repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
riga/tfdeploy | tfdeploy.py | Unpack | def Unpack(a, num, axis):
"""
Unpack op.
"""
return tuple(np.squeeze(b, axis=axis) for b in np.split(a, num, axis=axis)) | python | def Unpack(a, num, axis):
"""
Unpack op.
"""
return tuple(np.squeeze(b, axis=axis) for b in np.split(a, num, axis=axis)) | Unpack op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1216-L1220 |
riga/tfdeploy | tfdeploy.py | ReverseSequence | def ReverseSequence(a, seq_lengths, seq_dim, batch_dim):
"""
Sequential reverse op.
"""
r = np.copy(a)
invidxs = (len(r.shape) - 1) * [slice(None)]
if seq_dim < batch_dim:
invidxs[seq_dim] = slice(None, None, -1)
else:
invidxs[seq_dim - 1] = slice(None, None, -1)
_invidxs = tuple(invidxs)
selidxs = len(r.shape) * [slice(None)]
for i, l in enumerate(seq_lengths):
if not l:
continue
selidxs[batch_dim] = i
selidxs[seq_dim] = slice(0, l)
_selidxs = tuple(selidxs)
r[_selidxs] = a[_selidxs][_invidxs]
return r, | python | def ReverseSequence(a, seq_lengths, seq_dim, batch_dim):
"""
Sequential reverse op.
"""
r = np.copy(a)
invidxs = (len(r.shape) - 1) * [slice(None)]
if seq_dim < batch_dim:
invidxs[seq_dim] = slice(None, None, -1)
else:
invidxs[seq_dim - 1] = slice(None, None, -1)
_invidxs = tuple(invidxs)
selidxs = len(r.shape) * [slice(None)]
for i, l in enumerate(seq_lengths):
if not l:
continue
selidxs[batch_dim] = i
selidxs[seq_dim] = slice(0, l)
_selidxs = tuple(selidxs)
r[_selidxs] = a[_selidxs][_invidxs]
return r, | Sequential reverse op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1224-L1243 |
riga/tfdeploy | tfdeploy.py | ReverseV2 | def ReverseV2(a, axes):
"""
Reverse op.
"""
idxs = tuple(slice(None, None, 2 * int(i not in axes) - 1) for i in range(len(a.shape)))
return np.copy(a[idxs]), | python | def ReverseV2(a, axes):
"""
Reverse op.
"""
idxs = tuple(slice(None, None, 2 * int(i not in axes) - 1) for i in range(len(a.shape)))
return np.copy(a[idxs]), | Reverse op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1247-L1252 |
riga/tfdeploy | tfdeploy.py | Betainc | def Betainc(a, b, x):
"""
Complemented, incomplete gamma op.
"""
return sp.special.betainc(a, b, x), | python | def Betainc(a, b, x):
"""
Complemented, incomplete gamma op.
"""
return sp.special.betainc(a, b, x), | Complemented, incomplete gamma op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1588-L1592 |
riga/tfdeploy | tfdeploy.py | Diag | def Diag(a):
"""
Diag op.
"""
r = np.zeros(2 * a.shape, dtype=a.dtype)
for idx, v in np.ndenumerate(a):
r[2 * idx] = v
return r, | python | def Diag(a):
"""
Diag op.
"""
r = np.zeros(2 * a.shape, dtype=a.dtype)
for idx, v in np.ndenumerate(a):
r[2 * idx] = v
return r, | Diag op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1600-L1607 |
riga/tfdeploy | tfdeploy.py | MatrixDiagPart | def MatrixDiagPart(a):
"""
Batched diag op that returns only the diagonal elements.
"""
r = np.zeros(a.shape[:-2] + (min(a.shape[-2:]),))
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = np.diagonal(a[pos])
return r, | python | def MatrixDiagPart(a):
"""
Batched diag op that returns only the diagonal elements.
"""
r = np.zeros(a.shape[:-2] + (min(a.shape[-2:]),))
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = np.diagonal(a[pos])
return r, | Batched diag op that returns only the diagonal elements. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1619-L1627 |
riga/tfdeploy | tfdeploy.py | MatMul | def MatMul(a, b, transpose_a, transpose_b):
"""
Matrix multiplication op.
"""
return np.dot(a if not transpose_a else np.transpose(a),
b if not transpose_b else np.transpose(b)), | python | def MatMul(a, b, transpose_a, transpose_b):
"""
Matrix multiplication op.
"""
return np.dot(a if not transpose_a else np.transpose(a),
b if not transpose_b else np.transpose(b)), | Matrix multiplication op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1631-L1636 |
riga/tfdeploy | tfdeploy.py | MatrixInverse | def MatrixInverse(a, adj):
"""
Matrix inversion op.
"""
return np.linalg.inv(a if not adj else _adjoint(a)), | python | def MatrixInverse(a, adj):
"""
Matrix inversion op.
"""
return np.linalg.inv(a if not adj else _adjoint(a)), | Matrix inversion op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1648-L1652 |
riga/tfdeploy | tfdeploy.py | MatrixSolve | def MatrixSolve(a, rhs, adj):
"""
Matrix solve op.
"""
return np.linalg.solve(a if not adj else _adjoint(a), rhs), | python | def MatrixSolve(a, rhs, adj):
"""
Matrix solve op.
"""
return np.linalg.solve(a if not adj else _adjoint(a), rhs), | Matrix solve op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1664-L1668 |
riga/tfdeploy | tfdeploy.py | MatrixTriangularSolve | def MatrixTriangularSolve(a, rhs, lower, adj):
"""
Matrix triangular solve op.
"""
trans = 0 if not adj else 2
r = np.empty(rhs.shape).astype(a.dtype)
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = sp.linalg.solve_triangular(a[pos] if not adj else np.conj(a[pos]), rhs[pos],
trans=trans, lower=lower)
return r, | python | def MatrixTriangularSolve(a, rhs, lower, adj):
"""
Matrix triangular solve op.
"""
trans = 0 if not adj else 2
r = np.empty(rhs.shape).astype(a.dtype)
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = sp.linalg.solve_triangular(a[pos] if not adj else np.conj(a[pos]), rhs[pos],
trans=trans, lower=lower)
return r, | Matrix triangular solve op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1672-L1684 |
riga/tfdeploy | tfdeploy.py | MatrixSolveLs | def MatrixSolveLs(a, rhs, l2_reg):
"""
Matrix least-squares solve op.
"""
r = np.empty(rhs.shape).astype(a.dtype)
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = np.linalg.lstsq(a[pos], rhs[pos])[0]
return r, | python | def MatrixSolveLs(a, rhs, l2_reg):
"""
Matrix least-squares solve op.
"""
r = np.empty(rhs.shape).astype(a.dtype)
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = np.linalg.lstsq(a[pos], rhs[pos])[0]
return r, | Matrix least-squares solve op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1688-L1697 |
riga/tfdeploy | tfdeploy.py | SelfAdjointEig | def SelfAdjointEig(a):
"""
Eigen decomp op.
"""
shape = list(a.shape)
shape[-2] += 1
return np.append(*np.linalg.eig(a)).reshape(*shape), | python | def SelfAdjointEig(a):
"""
Eigen decomp op.
"""
shape = list(a.shape)
shape[-2] += 1
return np.append(*np.linalg.eig(a)).reshape(*shape), | Eigen decomp op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1701-L1707 |
riga/tfdeploy | tfdeploy.py | Svd | def Svd(a, uv, full):
"""
Single value decomp op.
"""
u, s, v = np.linalg.svd(a, full_matrices=full, compute_uv=uv)
return s, u, v | python | def Svd(a, uv, full):
"""
Single value decomp op.
"""
u, s, v = np.linalg.svd(a, full_matrices=full, compute_uv=uv)
return s, u, v | Single value decomp op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1719-L1724 |
riga/tfdeploy | tfdeploy.py | Sum | def Sum(a, axis, keep_dims):
"""
Sum reduction op.
"""
return np.sum(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | python | def Sum(a, axis, keep_dims):
"""
Sum reduction op.
"""
return np.sum(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | Sum reduction op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1804-L1809 |
riga/tfdeploy | tfdeploy.py | Prod | def Prod(a, axis, keep_dims):
"""
Prod reduction op.
"""
return np.prod(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | python | def Prod(a, axis, keep_dims):
"""
Prod reduction op.
"""
return np.prod(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | Prod reduction op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1813-L1818 |
riga/tfdeploy | tfdeploy.py | Min | def Min(a, axis, keep_dims):
"""
Min reduction op.
"""
return np.amin(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | python | def Min(a, axis, keep_dims):
"""
Min reduction op.
"""
return np.amin(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | Min reduction op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1822-L1827 |
riga/tfdeploy | tfdeploy.py | Max | def Max(a, axis, keep_dims):
"""
Max reduction op.
"""
return np.amax(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | python | def Max(a, axis, keep_dims):
"""
Max reduction op.
"""
return np.amax(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | Max reduction op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1831-L1836 |
riga/tfdeploy | tfdeploy.py | Mean | def Mean(a, axis, keep_dims):
"""
Mean reduction op.
"""
return np.mean(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | python | def Mean(a, axis, keep_dims):
"""
Mean reduction op.
"""
return np.mean(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | Mean reduction op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1840-L1845 |
riga/tfdeploy | tfdeploy.py | All | def All(a, axis, keep_dims):
"""
All reduction op.
"""
return np.all(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | python | def All(a, axis, keep_dims):
"""
All reduction op.
"""
return np.all(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | All reduction op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1849-L1854 |
riga/tfdeploy | tfdeploy.py | Any | def Any(a, axis, keep_dims):
"""
Any reduction op.
"""
return np.any(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | python | def Any(a, axis, keep_dims):
"""
Any reduction op.
"""
return np.any(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | Any reduction op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1858-L1863 |
riga/tfdeploy | tfdeploy.py | SegmentSum | def SegmentSum(a, ids, *args):
"""
Segmented sum op.
"""
func = lambda idxs: reduce(np.add, a[idxs])
return seg_map(func, a, ids), | python | def SegmentSum(a, ids, *args):
"""
Segmented sum op.
"""
func = lambda idxs: reduce(np.add, a[idxs])
return seg_map(func, a, ids), | Segmented sum op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1881-L1886 |
riga/tfdeploy | tfdeploy.py | SegmentProd | def SegmentProd(a, ids):
"""
Segmented prod op.
"""
func = lambda idxs: reduce(np.multiply, a[idxs])
return seg_map(func, a, ids), | python | def SegmentProd(a, ids):
"""
Segmented prod op.
"""
func = lambda idxs: reduce(np.multiply, a[idxs])
return seg_map(func, a, ids), | Segmented prod op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1890-L1895 |
riga/tfdeploy | tfdeploy.py | SegmentMin | def SegmentMin(a, ids):
"""
Segmented min op.
"""
func = lambda idxs: np.amin(a[idxs], axis=0)
return seg_map(func, a, ids), | python | def SegmentMin(a, ids):
"""
Segmented min op.
"""
func = lambda idxs: np.amin(a[idxs], axis=0)
return seg_map(func, a, ids), | Segmented min op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1899-L1904 |
riga/tfdeploy | tfdeploy.py | SegmentMax | def SegmentMax(a, ids):
"""
Segmented max op.
"""
func = lambda idxs: np.amax(a[idxs], axis=0)
return seg_map(func, a, ids), | python | def SegmentMax(a, ids):
"""
Segmented max op.
"""
func = lambda idxs: np.amax(a[idxs], axis=0)
return seg_map(func, a, ids), | Segmented max op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1908-L1913 |
riga/tfdeploy | tfdeploy.py | SegmentMean | def SegmentMean(a, ids):
"""
Segmented mean op.
"""
func = lambda idxs: np.mean(a[idxs], axis=0)
return seg_map(func, a, ids), | python | def SegmentMean(a, ids):
"""
Segmented mean op.
"""
func = lambda idxs: np.mean(a[idxs], axis=0)
return seg_map(func, a, ids), | Segmented mean op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1917-L1922 |
riga/tfdeploy | tfdeploy.py | SparseSegmentSqrtN | def SparseSegmentSqrtN(a, idxs, ids):
"""
Sparse segmented sum / sqrt(n=len(idxs)) op.
"""
func = lambda _idxs: np.divide(reduce(np.add, a[idxs][_idxs]), np.math.sqrt(len(_idxs)))
return seg_map(func, a, ids), | python | def SparseSegmentSqrtN(a, idxs, ids):
"""
Sparse segmented sum / sqrt(n=len(idxs)) op.
"""
func = lambda _idxs: np.divide(reduce(np.add, a[idxs][_idxs]), np.math.sqrt(len(_idxs)))
return seg_map(func, a, ids), | Sparse segmented sum / sqrt(n=len(idxs)) op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1942-L1947 |
riga/tfdeploy | tfdeploy.py | ListDiff | def ListDiff(a, b):
"""
List diff op.
"""
d = np.setdiff1d(a, b)
return d, np.searchsorted(a, d).astype(np.int32) | python | def ListDiff(a, b):
"""
List diff op.
"""
d = np.setdiff1d(a, b)
return d, np.searchsorted(a, d).astype(np.int32) | List diff op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1971-L1976 |
riga/tfdeploy | tfdeploy.py | Unique | def Unique(a, t):
"""
Unique op.
"""
_, idxs, inv = np.unique(a, return_index=True, return_inverse=True)
return np.copy(a)[np.sort(idxs)], idxs[inv].astype(dtype_map[t]) | python | def Unique(a, t):
"""
Unique op.
"""
_, idxs, inv = np.unique(a, return_index=True, return_inverse=True)
return np.copy(a)[np.sort(idxs)], idxs[inv].astype(dtype_map[t]) | Unique op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1988-L1993 |
riga/tfdeploy | tfdeploy.py | Elu | def Elu(a):
"""
Elu op.
"""
return np.where(a < 0, np.subtract(np.exp(a), 1), a), | python | def Elu(a):
"""
Elu op.
"""
return np.where(a < 0, np.subtract(np.exp(a), 1), a), | Elu op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2037-L2041 |
riga/tfdeploy | tfdeploy.py | Softsign | def Softsign(a):
"""
Softsign op.
"""
return np.divide(a, np.add(np.abs(a), 1)), | python | def Softsign(a):
"""
Softsign op.
"""
return np.divide(a, np.add(np.abs(a), 1)), | Softsign op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2053-L2057 |
riga/tfdeploy | tfdeploy.py | Softmax | def Softmax(a):
"""
Softmax op.
"""
e = np.exp(a)
return np.divide(e, np.sum(e, axis=-1, keepdims=True)), | python | def Softmax(a):
"""
Softmax op.
"""
e = np.exp(a)
return np.divide(e, np.sum(e, axis=-1, keepdims=True)), | Softmax op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2077-L2082 |
riga/tfdeploy | tfdeploy.py | Conv1D | def Conv1D(a, f, strides, padding, data_format):
"""
1D conv op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _conv_patches(a, f, 3 * [strides], padding.decode("ascii"))
conv = np.sum(patches, axis=tuple(range(-f.ndim, -1)))
if data_format.decode("ascii") == "NCHW":
conv = np.rollaxis(conv, -1, 1)
return conv, | python | def Conv1D(a, f, strides, padding, data_format):
"""
1D conv op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _conv_patches(a, f, 3 * [strides], padding.decode("ascii"))
conv = np.sum(patches, axis=tuple(range(-f.ndim, -1)))
if data_format.decode("ascii") == "NCHW":
conv = np.rollaxis(conv, -1, 1)
return conv, | 1D conv op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2125-L2138 |
riga/tfdeploy | tfdeploy.py | Conv3D | def Conv3D(a, f, strides, padding):
"""
3D conv op.
"""
patches = _conv_patches(a, f, strides, padding.decode("ascii"))
return np.sum(patches, axis=tuple(range(-f.ndim, -1))), | python | def Conv3D(a, f, strides, padding):
"""
3D conv op.
"""
patches = _conv_patches(a, f, strides, padding.decode("ascii"))
return np.sum(patches, axis=tuple(range(-f.ndim, -1))), | 3D conv op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2159-L2164 |
riga/tfdeploy | tfdeploy.py | AvgPool | def AvgPool(a, k, strides, padding, data_format):
"""
Average pooling op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
pool = np.average(patches, axis=tuple(range(-len(k), 0)))
if data_format.decode("ascii") == "NCHW":
pool = np.rollaxis(pool, -1, 1)
return pool, | python | def AvgPool(a, k, strides, padding, data_format):
"""
Average pooling op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
pool = np.average(patches, axis=tuple(range(-len(k), 0)))
if data_format.decode("ascii") == "NCHW":
pool = np.rollaxis(pool, -1, 1)
return pool, | Average pooling op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2190-L2203 |
riga/tfdeploy | tfdeploy.py | MaxPool | def MaxPool(a, k, strides, padding, data_format):
"""
Maximum pooling op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
pool = np.amax(patches, axis=tuple(range(-len(k), 0)))
if data_format.decode("ascii") == "NCHW":
pool = np.rollaxis(pool, -1, 1)
return pool, | python | def MaxPool(a, k, strides, padding, data_format):
"""
Maximum pooling op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
pool = np.amax(patches, axis=tuple(range(-len(k), 0)))
if data_format.decode("ascii") == "NCHW":
pool = np.rollaxis(pool, -1, 1)
return pool, | Maximum pooling op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2207-L2220 |
riga/tfdeploy | tfdeploy.py | AvgPool3D | def AvgPool3D(a, k, strides, padding):
"""
Average 3D pooling op.
"""
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
return np.average(patches, axis=tuple(range(-len(k), 0))), | python | def AvgPool3D(a, k, strides, padding):
"""
Average 3D pooling op.
"""
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
return np.average(patches, axis=tuple(range(-len(k), 0))), | Average 3D pooling op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2224-L2229 |
riga/tfdeploy | tfdeploy.py | MaxPool3D | def MaxPool3D(a, k, strides, padding):
"""
Maximum 3D pooling op.
"""
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
return np.amax(patches, axis=tuple(range(-len(k), 0))), | python | def MaxPool3D(a, k, strides, padding):
"""
Maximum 3D pooling op.
"""
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
return np.amax(patches, axis=tuple(range(-len(k), 0))), | Maximum 3D pooling op. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2233-L2238 |
riga/tfdeploy | tfdeploy.py | Model.get | def get(self, *names, **kwargs):
""" get(*names, key=None)
Returns one or more :py:class:`Tensor` instances given by *names* using a deep lookup within
the model. If *key* is not *None*, only the root tensor with that *key* is traversed. *None*
is returned when no tensor was found. In case a tensor is passed, it's name is used for the
lookup.
"""
tensors = tuple(self._get(name, **kwargs) for name in names)
return tensors[0] if len(names) == 1 else tensors | python | def get(self, *names, **kwargs):
""" get(*names, key=None)
Returns one or more :py:class:`Tensor` instances given by *names* using a deep lookup within
the model. If *key* is not *None*, only the root tensor with that *key* is traversed. *None*
is returned when no tensor was found. In case a tensor is passed, it's name is used for the
lookup.
"""
tensors = tuple(self._get(name, **kwargs) for name in names)
return tensors[0] if len(names) == 1 else tensors | get(*names, key=None)
Returns one or more :py:class:`Tensor` instances given by *names* using a deep lookup within
the model. If *key* is not *None*, only the root tensor with that *key* is traversed. *None*
is returned when no tensor was found. In case a tensor is passed, it's name is used for the
lookup. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L115-L123 |
riga/tfdeploy | tfdeploy.py | Model.add | def add(self, tensor, tf_sess=None, key=None, **kwargs):
"""
Adds a new root *tensor* for a *key* which, if *None*, defaults to a consecutive number.
When *tensor* is not an instance of :py:class:`Tensor` but an instance of
``tensorflow.Tensor``, it is converted first. In that case, *tf_sess* should be a valid
tensorflow session and *kwargs* are forwarded to the :py:class:`Tensor` constructor.
"""
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor, tf_sess, **kwargs)
if key is None:
if len(self.roots) == 0:
key = 0
else:
key = max(self.roots.keys()) + 1
self.roots[key] = tensor | python | def add(self, tensor, tf_sess=None, key=None, **kwargs):
"""
Adds a new root *tensor* for a *key* which, if *None*, defaults to a consecutive number.
When *tensor* is not an instance of :py:class:`Tensor` but an instance of
``tensorflow.Tensor``, it is converted first. In that case, *tf_sess* should be a valid
tensorflow session and *kwargs* are forwarded to the :py:class:`Tensor` constructor.
"""
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor, tf_sess, **kwargs)
if key is None:
if len(self.roots) == 0:
key = 0
else:
key = max(self.roots.keys()) + 1
self.roots[key] = tensor | Adds a new root *tensor* for a *key* which, if *None*, defaults to a consecutive number.
When *tensor* is not an instance of :py:class:`Tensor` but an instance of
``tensorflow.Tensor``, it is converted first. In that case, *tf_sess* should be a valid
tensorflow session and *kwargs* are forwarded to the :py:class:`Tensor` constructor. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L145-L161 |
riga/tfdeploy | tfdeploy.py | Model.load | def load(self, path):
"""
Loads all tensors from a file defined by *path* and adds them to the root set.
"""
path = os.path.expandvars(os.path.expanduser(path))
with open(path, "rb") as f:
roots = pickle.load(f)
for key, tensor in roots.items():
self.add(tensor, key=key) | python | def load(self, path):
"""
Loads all tensors from a file defined by *path* and adds them to the root set.
"""
path = os.path.expandvars(os.path.expanduser(path))
with open(path, "rb") as f:
roots = pickle.load(f)
for key, tensor in roots.items():
self.add(tensor, key=key) | Loads all tensors from a file defined by *path* and adds them to the root set. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L163-L172 |
riga/tfdeploy | tfdeploy.py | Model.save | def save(self, path):
"""
Saves all tensors of the root set to a file defined by *path*.
"""
path = os.path.expandvars(os.path.expanduser(path))
with open(path, "wb") as f:
pickle.dump(self.roots, f) | python | def save(self, path):
"""
Saves all tensors of the root set to a file defined by *path*.
"""
path = os.path.expandvars(os.path.expanduser(path))
with open(path, "wb") as f:
pickle.dump(self.roots, f) | Saves all tensors of the root set to a file defined by *path*. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L174-L180 |
riga/tfdeploy | tfdeploy.py | Ensemble.get | def get(self, *names, **kwargs):
""" get(*names, key=None)
Returns one or more :py:class:`TensorEnsemble` instances given by *names* using a deep
lookup within all read models. Each returned tensor ensemble will have ``len(models)``
tensors. If a model does not contain a specific tensor defined by a specific *name*, the
associated ensemble tensor will contain a *None* for that model in its tensors. If *key* is
not *None*, only the root tensors with that *key* are traversed.
"""
# create empty tensor ensembles with our method
tensor_ensembles = [TensorEnsemble([], self.method) for name in names]
# loop over models, collect and add tensors
for model in self.models:
tensors = model.get(*names, **kwargs)
if not isinstance(tensors, tuple):
tensors = (tensors,)
for i, t in enumerate(tensors if isinstance(tensors, tuple) else (tensors,)):
tensor_ensembles[i].tensors.append(t)
return tensor_ensembles[0] if len(names) == 1 else tuple(tensor_ensembles) | python | def get(self, *names, **kwargs):
""" get(*names, key=None)
Returns one or more :py:class:`TensorEnsemble` instances given by *names* using a deep
lookup within all read models. Each returned tensor ensemble will have ``len(models)``
tensors. If a model does not contain a specific tensor defined by a specific *name*, the
associated ensemble tensor will contain a *None* for that model in its tensors. If *key* is
not *None*, only the root tensors with that *key* are traversed.
"""
# create empty tensor ensembles with our method
tensor_ensembles = [TensorEnsemble([], self.method) for name in names]
# loop over models, collect and add tensors
for model in self.models:
tensors = model.get(*names, **kwargs)
if not isinstance(tensors, tuple):
tensors = (tensors,)
for i, t in enumerate(tensors if isinstance(tensors, tuple) else (tensors,)):
tensor_ensembles[i].tensors.append(t)
return tensor_ensembles[0] if len(names) == 1 else tuple(tensor_ensembles) | get(*names, key=None)
Returns one or more :py:class:`TensorEnsemble` instances given by *names* using a deep
lookup within all read models. Each returned tensor ensemble will have ``len(models)``
tensors. If a model does not contain a specific tensor defined by a specific *name*, the
associated ensemble tensor will contain a *None* for that model in its tensors. If *key* is
not *None*, only the root tensors with that *key* are traversed. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L620-L639 |
riga/tfdeploy | tfdeploy.py | Ensemble.load | def load(self, paths):
"""
Loads models from a list of *paths*.
"""
for path in paths:
self.models.append(Model(path)) | python | def load(self, paths):
"""
Loads models from a list of *paths*.
"""
for path in paths:
self.models.append(Model(path)) | Loads models from a list of *paths*. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L641-L646 |
riga/tfdeploy | tfdeploy.py | TensorEnsemble.eval | def eval(self, feed_dict=None):
"""
Evaluates all contained tensors using a *feed_dict* and returns the ensemble value. The keys
of *feed_dict* must be tensor ensembles. Its values can be batches, i.e., numpy arrays, or
lists or tuples of batches. In the latter case, these lists or tuples must have the same
length as the list of stored tensors as they will be mapped.
"""
# first, check that the length of all feed_dict keys match our own length
for tensor_ensemble in feed_dict:
if len(tensor_ensemble.tensors) != len(self.tensors):
raise EnsembleMismatchException("incompatible lengths of tensors: %d, %d" \
% (len(self.tensors), len(tensor_ensemble.tensors)))
# create a joined uuid
_uuid = uuid4()
# prepare feed_dicts
feed_dicts = [{} for _ in range(len(self.tensors))]
for tensor_ensemble, value in feed_dict.items():
for i, tensor in enumerate(tensor_ensemble.tensors):
if tensor is not None:
feed_dicts[i][tensor] = value[i] if isinstance(value, (list, tuple)) else value
# eval all tensors
values = [t.eval(feed_dict=d, _uuid=_uuid) for t, d in zip(self.tensors, feed_dicts)]
# return the computed ensemble value
return self.func(values) | python | def eval(self, feed_dict=None):
"""
Evaluates all contained tensors using a *feed_dict* and returns the ensemble value. The keys
of *feed_dict* must be tensor ensembles. Its values can be batches, i.e., numpy arrays, or
lists or tuples of batches. In the latter case, these lists or tuples must have the same
length as the list of stored tensors as they will be mapped.
"""
# first, check that the length of all feed_dict keys match our own length
for tensor_ensemble in feed_dict:
if len(tensor_ensemble.tensors) != len(self.tensors):
raise EnsembleMismatchException("incompatible lengths of tensors: %d, %d" \
% (len(self.tensors), len(tensor_ensemble.tensors)))
# create a joined uuid
_uuid = uuid4()
# prepare feed_dicts
feed_dicts = [{} for _ in range(len(self.tensors))]
for tensor_ensemble, value in feed_dict.items():
for i, tensor in enumerate(tensor_ensemble.tensors):
if tensor is not None:
feed_dicts[i][tensor] = value[i] if isinstance(value, (list, tuple)) else value
# eval all tensors
values = [t.eval(feed_dict=d, _uuid=_uuid) for t, d in zip(self.tensors, feed_dicts)]
# return the computed ensemble value
return self.func(values) | Evaluates all contained tensors using a *feed_dict* and returns the ensemble value. The keys
of *feed_dict* must be tensor ensembles. Its values can be batches, i.e., numpy arrays, or
lists or tuples of batches. In the latter case, these lists or tuples must have the same
length as the list of stored tensors as they will be mapped. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L673-L700 |
riga/tfdeploy | tfdeploy.py | TensorEnsemble.func | def func(self, values):
"""
The actual ensembling logic that combines multiple *values*. The method call is forwareded
tothe ensemble method-specific variant which is determined using *method*.
"""
if self.method == METHOD_MEAN:
return self.func_mean(values)
elif self.method == METHOD_MAX:
return self.func_max(values)
elif self.method == METHOD_MIN:
return self.func_min(values)
elif self.method == METHOD_CUSTOM:
return self.func_custom(values)
else:
raise UnknownEnsembleMethodException(self.method) | python | def func(self, values):
"""
The actual ensembling logic that combines multiple *values*. The method call is forwareded
tothe ensemble method-specific variant which is determined using *method*.
"""
if self.method == METHOD_MEAN:
return self.func_mean(values)
elif self.method == METHOD_MAX:
return self.func_max(values)
elif self.method == METHOD_MIN:
return self.func_min(values)
elif self.method == METHOD_CUSTOM:
return self.func_custom(values)
else:
raise UnknownEnsembleMethodException(self.method) | The actual ensembling logic that combines multiple *values*. The method call is forwareded
tothe ensemble method-specific variant which is determined using *method*. | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L705-L719 |
adafruit/Adafruit_CircuitPython_MatrixKeypad | adafruit_matrixkeypad.py | Matrix_Keypad.pressed_keys | def pressed_keys(self):
"""An array containing all detected keys that are pressed from the initalized
list-of-lists passed in during creation"""
# make a list of all the keys that are detected
pressed = []
# set all pins pins to be inputs w/pullups
for pin in self.row_pins+self.col_pins:
pin.direction = Direction.INPUT
pin.pull = Pull.UP
for row in range(len(self.row_pins)):
# set one row low at a time
self.row_pins[row].direction = Direction.OUTPUT
self.row_pins[row].value = False
# check the column pins, which ones are pulled down
for col in range(len(self.col_pins)):
if not self.col_pins[col].value:
pressed.append(self.keys[row][col])
# reset the pin to be an input
self.row_pins[row].direction = Direction.INPUT
self.row_pins[row].pull = Pull.UP
return pressed | python | def pressed_keys(self):
"""An array containing all detected keys that are pressed from the initalized
list-of-lists passed in during creation"""
# make a list of all the keys that are detected
pressed = []
# set all pins pins to be inputs w/pullups
for pin in self.row_pins+self.col_pins:
pin.direction = Direction.INPUT
pin.pull = Pull.UP
for row in range(len(self.row_pins)):
# set one row low at a time
self.row_pins[row].direction = Direction.OUTPUT
self.row_pins[row].value = False
# check the column pins, which ones are pulled down
for col in range(len(self.col_pins)):
if not self.col_pins[col].value:
pressed.append(self.keys[row][col])
# reset the pin to be an input
self.row_pins[row].direction = Direction.INPUT
self.row_pins[row].pull = Pull.UP
return pressed | An array containing all detected keys that are pressed from the initalized
list-of-lists passed in during creation | https://github.com/adafruit/Adafruit_CircuitPython_MatrixKeypad/blob/f530b1a920a40ef09ec1394b7760f243a243045a/adafruit_matrixkeypad.py#L69-L91 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.get_load_times | def get_load_times(self, asset_type):
"""
Just a ``list`` of the load times of a certain asset type for each page
:param asset_type: ``str`` of the asset type to return load times for
"""
load_times = []
search_str = '{0}_load_time'.format(asset_type)
for har_page in self.pages:
val = getattr(har_page, search_str, None)
if val is not None:
load_times.append(val)
return load_times | python | def get_load_times(self, asset_type):
"""
Just a ``list`` of the load times of a certain asset type for each page
:param asset_type: ``str`` of the asset type to return load times for
"""
load_times = []
search_str = '{0}_load_time'.format(asset_type)
for har_page in self.pages:
val = getattr(har_page, search_str, None)
if val is not None:
load_times.append(val)
return load_times | Just a ``list`` of the load times of a certain asset type for each page
:param asset_type: ``str`` of the asset type to return load times for | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L41-L53 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.get_stdev | def get_stdev(self, asset_type):
"""
Returns the standard deviation for a set of a certain asset type.
:param asset_type: ``str`` of the asset type to calculate standard
deviation for.
:returns: A ``int`` or ``float`` of standard deviation, depending on
the self.decimal_precision
"""
load_times = []
# Handle edge cases like TTFB
if asset_type == 'ttfb':
for page in self.pages:
if page.time_to_first_byte is not None:
load_times.append(page.time_to_first_byte)
elif asset_type not in self.asset_types and asset_type != 'page':
raise ValueError('asset_type must be one of:\nttfb\n{0}'.format(
'\n'.join(self.asset_types)))
else:
load_times = self.get_load_times(asset_type)
if not load_times or not sum(load_times):
return 0
return round(stdev(load_times),
self.decimal_precision) | python | def get_stdev(self, asset_type):
"""
Returns the standard deviation for a set of a certain asset type.
:param asset_type: ``str`` of the asset type to calculate standard
deviation for.
:returns: A ``int`` or ``float`` of standard deviation, depending on
the self.decimal_precision
"""
load_times = []
# Handle edge cases like TTFB
if asset_type == 'ttfb':
for page in self.pages:
if page.time_to_first_byte is not None:
load_times.append(page.time_to_first_byte)
elif asset_type not in self.asset_types and asset_type != 'page':
raise ValueError('asset_type must be one of:\nttfb\n{0}'.format(
'\n'.join(self.asset_types)))
else:
load_times = self.get_load_times(asset_type)
if not load_times or not sum(load_times):
return 0
return round(stdev(load_times),
self.decimal_precision) | Returns the standard deviation for a set of a certain asset type.
:param asset_type: ``str`` of the asset type to calculate standard
deviation for.
:returns: A ``int`` or ``float`` of standard deviation, depending on
the self.decimal_precision | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L55-L79 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.pages | def pages(self):
"""
The aggregate pages of all the parser objects.
"""
pages = []
for har_dict in self.har_data:
har_parser = HarParser(har_data=har_dict)
if self.page_id:
for page in har_parser.pages:
if page.page_id == self.page_id:
pages.append(page)
else:
pages = pages + har_parser.pages
return pages | python | def pages(self):
"""
The aggregate pages of all the parser objects.
"""
pages = []
for har_dict in self.har_data:
har_parser = HarParser(har_data=har_dict)
if self.page_id:
for page in har_parser.pages:
if page.page_id == self.page_id:
pages.append(page)
else:
pages = pages + har_parser.pages
return pages | The aggregate pages of all the parser objects. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L82-L95 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.time_to_first_byte | def time_to_first_byte(self):
"""
The aggregate time to first byte for all pages.
"""
ttfb = []
for page in self.pages:
if page.time_to_first_byte is not None:
ttfb.append(page.time_to_first_byte)
return round(mean(ttfb), self.decimal_precision) | python | def time_to_first_byte(self):
"""
The aggregate time to first byte for all pages.
"""
ttfb = []
for page in self.pages:
if page.time_to_first_byte is not None:
ttfb.append(page.time_to_first_byte)
return round(mean(ttfb), self.decimal_precision) | The aggregate time to first byte for all pages. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L105-L113 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.page_load_time | def page_load_time(self):
"""
The average total load time for all runs (not weighted).
"""
load_times = self.get_load_times('page')
return round(mean(load_times), self.decimal_precision) | python | def page_load_time(self):
"""
The average total load time for all runs (not weighted).
"""
load_times = self.get_load_times('page')
return round(mean(load_times), self.decimal_precision) | The average total load time for all runs (not weighted). | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L116-L121 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.js_load_time | def js_load_time(self):
"""
Returns aggregate javascript load time.
"""
load_times = self.get_load_times('js')
return round(mean(load_times), self.decimal_precision) | python | def js_load_time(self):
"""
Returns aggregate javascript load time.
"""
load_times = self.get_load_times('js')
return round(mean(load_times), self.decimal_precision) | Returns aggregate javascript load time. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L124-L129 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.css_load_time | def css_load_time(self):
"""
Returns aggregate css load time for all pages.
"""
load_times = self.get_load_times('css')
return round(mean(load_times), self.decimal_precision) | python | def css_load_time(self):
"""
Returns aggregate css load time for all pages.
"""
load_times = self.get_load_times('css')
return round(mean(load_times), self.decimal_precision) | Returns aggregate css load time for all pages. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L132-L137 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.image_load_time | def image_load_time(self):
"""
Returns aggregate image load time for all pages.
"""
load_times = self.get_load_times('image')
return round(mean(load_times), self.decimal_precision) | python | def image_load_time(self):
"""
Returns aggregate image load time for all pages.
"""
load_times = self.get_load_times('image')
return round(mean(load_times), self.decimal_precision) | Returns aggregate image load time for all pages. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L140-L145 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.html_load_time | def html_load_time(self):
"""
Returns aggregate html load time for all pages.
"""
load_times = self.get_load_times('html')
return round(mean(load_times), self.decimal_precision) | python | def html_load_time(self):
"""
Returns aggregate html load time for all pages.
"""
load_times = self.get_load_times('html')
return round(mean(load_times), self.decimal_precision) | Returns aggregate html load time for all pages. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L148-L153 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.audio_load_time | def audio_load_time(self):
"""
Returns aggregate audio load time for all pages.
"""
load_times = self.get_load_times('audio')
return round(mean(load_times), self.decimal_precision) | python | def audio_load_time(self):
"""
Returns aggregate audio load time for all pages.
"""
load_times = self.get_load_times('audio')
return round(mean(load_times), self.decimal_precision) | Returns aggregate audio load time for all pages. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L156-L161 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.video_load_time | def video_load_time(self):
"""
Returns aggregate video load time for all pages.
"""
load_times = self.get_load_times('video')
return round(mean(load_times), self.decimal_precision) | python | def video_load_time(self):
"""
Returns aggregate video load time for all pages.
"""
load_times = self.get_load_times('video')
return round(mean(load_times), self.decimal_precision) | Returns aggregate video load time for all pages. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L164-L169 |
mrname/haralyzer | haralyzer/assets.py | HarParser.match_headers | def match_headers(self, entry, header_type, header, value, regex=True):
"""
Function to match headers.
Since the output of headers might use different case, like:
'content-type' vs 'Content-Type'
This function is case-insensitive
:param entry: entry object
:param header_type: ``str`` of header type. Valid values:
* 'request'
* 'response'
:param header: ``str`` of the header to search for
:param value: ``str`` of value to search for
:param regex: ``bool`` indicating whether to use regex or exact match
:returns: a ``bool`` indicating whether a match was found
"""
if header_type not in entry:
raise ValueError('Invalid header_type, should be either:\n\n'
'* \'request\'\n*\'response\'')
# TODO - headers are empty in some HAR data.... need fallbacks here
for h in entry[header_type]['headers']:
if h['name'].lower() == header.lower() and h['value'] is not None:
if regex and re.search(value, h['value'], flags=re.IGNORECASE):
return True
elif value == h['value']:
return True
return False | python | def match_headers(self, entry, header_type, header, value, regex=True):
"""
Function to match headers.
Since the output of headers might use different case, like:
'content-type' vs 'Content-Type'
This function is case-insensitive
:param entry: entry object
:param header_type: ``str`` of header type. Valid values:
* 'request'
* 'response'
:param header: ``str`` of the header to search for
:param value: ``str`` of value to search for
:param regex: ``bool`` indicating whether to use regex or exact match
:returns: a ``bool`` indicating whether a match was found
"""
if header_type not in entry:
raise ValueError('Invalid header_type, should be either:\n\n'
'* \'request\'\n*\'response\'')
# TODO - headers are empty in some HAR data.... need fallbacks here
for h in entry[header_type]['headers']:
if h['name'].lower() == header.lower() and h['value'] is not None:
if regex and re.search(value, h['value'], flags=re.IGNORECASE):
return True
elif value == h['value']:
return True
return False | Function to match headers.
Since the output of headers might use different case, like:
'content-type' vs 'Content-Type'
This function is case-insensitive
:param entry: entry object
:param header_type: ``str`` of header type. Valid values:
* 'request'
* 'response'
:param header: ``str`` of the header to search for
:param value: ``str`` of value to search for
:param regex: ``bool`` indicating whether to use regex or exact match
:returns: a ``bool`` indicating whether a match was found | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L40-L73 |
mrname/haralyzer | haralyzer/assets.py | HarParser.match_content_type | def match_content_type(entry, content_type, regex=True):
"""
Matches the content type of a request using the mimeType metadata.
:param entry: ``dict`` of a single entry from a HarPage
:param content_type: ``str`` of regex to use for finding content type
:param regex: ``bool`` indicating whether to use regex or exact match.
"""
mimeType = entry['response']['content']['mimeType']
if regex and re.search(content_type, mimeType, flags=re.IGNORECASE):
return True
elif content_type == mimeType:
return True
return False | python | def match_content_type(entry, content_type, regex=True):
"""
Matches the content type of a request using the mimeType metadata.
:param entry: ``dict`` of a single entry from a HarPage
:param content_type: ``str`` of regex to use for finding content type
:param regex: ``bool`` indicating whether to use regex or exact match.
"""
mimeType = entry['response']['content']['mimeType']
if regex and re.search(content_type, mimeType, flags=re.IGNORECASE):
return True
elif content_type == mimeType:
return True
return False | Matches the content type of a request using the mimeType metadata.
:param entry: ``dict`` of a single entry from a HarPage
:param content_type: ``str`` of regex to use for finding content type
:param regex: ``bool`` indicating whether to use regex or exact match. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L76-L92 |
mrname/haralyzer | haralyzer/assets.py | HarParser.match_request_type | def match_request_type(self, entry, request_type, regex=True):
"""
Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param regex: ``bool`` indicating whether to use a regex or string match
"""
if regex:
return re.search(request_type, entry['request']['method'],
flags=re.IGNORECASE) is not None
else:
return entry['request']['method'] == request_type | python | def match_request_type(self, entry, request_type, regex=True):
"""
Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param regex: ``bool`` indicating whether to use a regex or string match
"""
if regex:
return re.search(request_type, entry['request']['method'],
flags=re.IGNORECASE) is not None
else:
return entry['request']['method'] == request_type | Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param regex: ``bool`` indicating whether to use a regex or string match | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L94-L107 |
mrname/haralyzer | haralyzer/assets.py | HarParser.match_http_version | def match_http_version(entry, http_version, regex=True):
"""
Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param regex: ``bool`` indicating whether to use a regex or string match
"""
response_version = entry['response']['httpVersion']
if regex:
return re.search(http_version, response_version,
flags=re.IGNORECASE) is not None
else:
return response_version == http_version | python | def match_http_version(entry, http_version, regex=True):
"""
Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param regex: ``bool`` indicating whether to use a regex or string match
"""
response_version = entry['response']['httpVersion']
if regex:
return re.search(http_version, response_version,
flags=re.IGNORECASE) is not None
else:
return response_version == http_version | Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param regex: ``bool`` indicating whether to use a regex or string match | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L110-L124 |
mrname/haralyzer | haralyzer/assets.py | HarParser.match_status_code | def match_status_code(self, entry, status_code, regex=True):
"""
Helper function that returns entries with a status code matching
then given `status_code` argument.
NOTE: This is doing a STRING comparison NOT NUMERICAL
:param entry: entry object to analyze
:param status_code: ``str`` of status code to search for
:param request_type: ``regex`` of request type to match
"""
if regex:
return re.search(status_code,
str(entry['response']['status'])) is not None
else:
return str(entry['response']['status']) == status_code | python | def match_status_code(self, entry, status_code, regex=True):
"""
Helper function that returns entries with a status code matching
then given `status_code` argument.
NOTE: This is doing a STRING comparison NOT NUMERICAL
:param entry: entry object to analyze
:param status_code: ``str`` of status code to search for
:param request_type: ``regex`` of request type to match
"""
if regex:
return re.search(status_code,
str(entry['response']['status'])) is not None
else:
return str(entry['response']['status']) == status_code | Helper function that returns entries with a status code matching
then given `status_code` argument.
NOTE: This is doing a STRING comparison NOT NUMERICAL
:param entry: entry object to analyze
:param status_code: ``str`` of status code to search for
:param request_type: ``regex`` of request type to match | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L126-L141 |
mrname/haralyzer | haralyzer/assets.py | HarParser.create_asset_timeline | def create_asset_timeline(self, asset_list):
"""
Returns a ``dict`` of the timeline for the requested assets. The key is
a datetime object (down to the millisecond) of ANY time where at least
one of the requested assets was loaded. The value is a ``list`` of ALL
assets that were loading at that time.
:param asset_list: ``list`` of the assets to create a timeline for.
"""
results = dict()
for asset in asset_list:
time_key = dateutil.parser.parse(asset['startedDateTime'])
load_time = int(asset['time'])
# Add the start time and asset to the results dict
if time_key in results:
results[time_key].append(asset)
else:
results[time_key] = [asset]
# For each millisecond the asset was loading, insert the asset
# into the appropriate key of the results dict. Starting the range()
# index at 1 because we already inserted the first millisecond.
for _ in range(1, load_time):
time_key = time_key + datetime.timedelta(milliseconds=1)
if time_key in results:
results[time_key].append(asset)
else:
results[time_key] = [asset]
return results | python | def create_asset_timeline(self, asset_list):
"""
Returns a ``dict`` of the timeline for the requested assets. The key is
a datetime object (down to the millisecond) of ANY time where at least
one of the requested assets was loaded. The value is a ``list`` of ALL
assets that were loading at that time.
:param asset_list: ``list`` of the assets to create a timeline for.
"""
results = dict()
for asset in asset_list:
time_key = dateutil.parser.parse(asset['startedDateTime'])
load_time = int(asset['time'])
# Add the start time and asset to the results dict
if time_key in results:
results[time_key].append(asset)
else:
results[time_key] = [asset]
# For each millisecond the asset was loading, insert the asset
# into the appropriate key of the results dict. Starting the range()
# index at 1 because we already inserted the first millisecond.
for _ in range(1, load_time):
time_key = time_key + datetime.timedelta(milliseconds=1)
if time_key in results:
results[time_key].append(asset)
else:
results[time_key] = [asset]
return results | Returns a ``dict`` of the timeline for the requested assets. The key is
a datetime object (down to the millisecond) of ANY time where at least
one of the requested assets was loaded. The value is a ``list`` of ALL
assets that were loading at that time.
:param asset_list: ``list`` of the assets to create a timeline for. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L143-L171 |
mrname/haralyzer | haralyzer/assets.py | HarParser.pages | def pages(self):
"""
This is a list of HarPage objects, each of which represents a page
from the HAR file.
"""
# Start with a page object for unknown entries if the HAR data has
# any entries with no page ID
pages = []
if any('pageref' not in entry for entry in self.har_data['entries']):
pages.append(HarPage('unknown', har_parser=self))
for har_page in self.har_data['pages']:
page = HarPage(har_page['id'], har_parser=self)
pages.append(page)
return pages | python | def pages(self):
"""
This is a list of HarPage objects, each of which represents a page
from the HAR file.
"""
# Start with a page object for unknown entries if the HAR data has
# any entries with no page ID
pages = []
if any('pageref' not in entry for entry in self.har_data['entries']):
pages.append(HarPage('unknown', har_parser=self))
for har_page in self.har_data['pages']:
page = HarPage(har_page['id'], har_parser=self)
pages.append(page)
return pages | This is a list of HarPage objects, each of which represents a page
from the HAR file. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L174-L188 |
mrname/haralyzer | haralyzer/assets.py | HarPage._get_asset_size_trans | def _get_asset_size_trans(self, asset_type):
"""
Helper function to dynamically create *_size properties.
"""
if asset_type == 'page':
assets = self.entries
else:
assets = getattr(self, '{0}_files'.format(asset_type), None)
return self.get_total_size_trans(assets) | python | def _get_asset_size_trans(self, asset_type):
"""
Helper function to dynamically create *_size properties.
"""
if asset_type == 'page':
assets = self.entries
else:
assets = getattr(self, '{0}_files'.format(asset_type), None)
return self.get_total_size_trans(assets) | Helper function to dynamically create *_size properties. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L266-L274 |
mrname/haralyzer | haralyzer/assets.py | HarPage._get_asset_size | def _get_asset_size(self, asset_type):
"""
Helper function to dynamically create *_size properties.
"""
if asset_type == 'page':
assets = self.entries
else:
assets = getattr(self, '{0}_files'.format(asset_type), None)
return self.get_total_size(assets) | python | def _get_asset_size(self, asset_type):
"""
Helper function to dynamically create *_size properties.
"""
if asset_type == 'page':
assets = self.entries
else:
assets = getattr(self, '{0}_files'.format(asset_type), None)
return self.get_total_size(assets) | Helper function to dynamically create *_size properties. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L276-L284 |
mrname/haralyzer | haralyzer/assets.py | HarPage._get_asset_load | def _get_asset_load(self, asset_type):
"""
Helper function to dynamically create *_load_time properties. Return
value is in ms.
"""
if asset_type == 'initial':
return self.actual_page['time']
elif asset_type == 'content':
return self.pageTimings['onContentLoad']
elif asset_type == 'page':
if self.page_id == 'unknown':
return None
return self.pageTimings['onLoad']
# TODO - should we return a slightly fake total load time to
# accomodate HAR data that cannot understand things like JS
# rendering or just throw a warning?
#return self.get_load_time(request_type='.*',content_type='.*', status_code='.*', asynchronous=False)
else:
return self.get_load_time(
content_type=self.asset_types[asset_type]
) | python | def _get_asset_load(self, asset_type):
"""
Helper function to dynamically create *_load_time properties. Return
value is in ms.
"""
if asset_type == 'initial':
return self.actual_page['time']
elif asset_type == 'content':
return self.pageTimings['onContentLoad']
elif asset_type == 'page':
if self.page_id == 'unknown':
return None
return self.pageTimings['onLoad']
# TODO - should we return a slightly fake total load time to
# accomodate HAR data that cannot understand things like JS
# rendering or just throw a warning?
#return self.get_load_time(request_type='.*',content_type='.*', status_code='.*', asynchronous=False)
else:
return self.get_load_time(
content_type=self.asset_types[asset_type]
) | Helper function to dynamically create *_load_time properties. Return
value is in ms. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L286-L306 |
mrname/haralyzer | haralyzer/assets.py | HarPage.filter_entries | def filter_entries(self, request_type=None, content_type=None,
status_code=None, http_version=None, regex=True):
"""
Returns a ``list`` of entry objects based on the filter criteria.
:param request_type: ``str`` of request type (i.e. - GET or POST)
:param content_type: ``str`` of regex to use for finding content type
:param status_code: ``int`` of the desired status code
:param http_version: ``str`` of HTTP version of request
:param regex: ``bool`` indicating whether to use regex or exact match.
"""
results = []
for entry in self.entries:
"""
So yea... this is a bit ugly. We are looking for:
* The request type using self._match_request_type()
* The content type using self._match_headers()
* The HTTP response status code using self._match_status_code()
* The HTTP version using self._match_headers()
Oh lords of python.... please forgive my soul
"""
valid_entry = True
p = self.parser
if request_type is not None and not p.match_request_type(
entry, request_type, regex=regex):
valid_entry = False
if content_type is not None:
if not self.parser.match_content_type(entry, content_type,
regex=regex):
valid_entry = False
if status_code is not None and not p.match_status_code(
entry, status_code, regex=regex):
valid_entry = False
if http_version is not None and not p.match_http_version(
entry, http_version, regex=regex):
valid_entry = False
if valid_entry:
results.append(entry)
return results | python | def filter_entries(self, request_type=None, content_type=None,
status_code=None, http_version=None, regex=True):
"""
Returns a ``list`` of entry objects based on the filter criteria.
:param request_type: ``str`` of request type (i.e. - GET or POST)
:param content_type: ``str`` of regex to use for finding content type
:param status_code: ``int`` of the desired status code
:param http_version: ``str`` of HTTP version of request
:param regex: ``bool`` indicating whether to use regex or exact match.
"""
results = []
for entry in self.entries:
"""
So yea... this is a bit ugly. We are looking for:
* The request type using self._match_request_type()
* The content type using self._match_headers()
* The HTTP response status code using self._match_status_code()
* The HTTP version using self._match_headers()
Oh lords of python.... please forgive my soul
"""
valid_entry = True
p = self.parser
if request_type is not None and not p.match_request_type(
entry, request_type, regex=regex):
valid_entry = False
if content_type is not None:
if not self.parser.match_content_type(entry, content_type,
regex=regex):
valid_entry = False
if status_code is not None and not p.match_status_code(
entry, status_code, regex=regex):
valid_entry = False
if http_version is not None and not p.match_http_version(
entry, http_version, regex=regex):
valid_entry = False
if valid_entry:
results.append(entry)
return results | Returns a ``list`` of entry objects based on the filter criteria.
:param request_type: ``str`` of request type (i.e. - GET or POST)
:param content_type: ``str`` of regex to use for finding content type
:param status_code: ``int`` of the desired status code
:param http_version: ``str`` of HTTP version of request
:param regex: ``bool`` indicating whether to use regex or exact match. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L308-L351 |
mrname/haralyzer | haralyzer/assets.py | HarPage.get_load_time | def get_load_time(self, request_type=None, content_type=None,
status_code=None, asynchronous=True, **kwargs):
"""
This method can return the TOTAL load time for the assets or the ACTUAL
load time, the difference being that the actual load time takes
asynchronous transactions into account. So, if you want the total load
time, set asynchronous=False.
EXAMPLE:
I want to know the load time for images on a page that has two images,
each of which took 2 seconds to download, but the browser downloaded
them at the same time.
self.get_load_time(content_types=['image']) (returns 2)
self.get_load_time(content_types=['image'], asynchronous=False) (returns 4)
"""
entries = self.filter_entries(
request_type=request_type, content_type=content_type,
status_code=status_code
)
if "async" in kwargs:
asynchronous = kwargs['async']
if not asynchronous:
time = 0
for entry in entries:
time += entry['time']
return time
else:
return len(self.parser.create_asset_timeline(entries)) | python | def get_load_time(self, request_type=None, content_type=None,
status_code=None, asynchronous=True, **kwargs):
"""
This method can return the TOTAL load time for the assets or the ACTUAL
load time, the difference being that the actual load time takes
asynchronous transactions into account. So, if you want the total load
time, set asynchronous=False.
EXAMPLE:
I want to know the load time for images on a page that has two images,
each of which took 2 seconds to download, but the browser downloaded
them at the same time.
self.get_load_time(content_types=['image']) (returns 2)
self.get_load_time(content_types=['image'], asynchronous=False) (returns 4)
"""
entries = self.filter_entries(
request_type=request_type, content_type=content_type,
status_code=status_code
)
if "async" in kwargs:
asynchronous = kwargs['async']
if not asynchronous:
time = 0
for entry in entries:
time += entry['time']
return time
else:
return len(self.parser.create_asset_timeline(entries)) | This method can return the TOTAL load time for the assets or the ACTUAL
load time, the difference being that the actual load time takes
asynchronous transactions into account. So, if you want the total load
time, set asynchronous=False.
EXAMPLE:
I want to know the load time for images on a page that has two images,
each of which took 2 seconds to download, but the browser downloaded
them at the same time.
self.get_load_time(content_types=['image']) (returns 2)
self.get_load_time(content_types=['image'], asynchronous=False) (returns 4) | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L353-L384 |
mrname/haralyzer | haralyzer/assets.py | HarPage.get_total_size | def get_total_size(self, entries):
"""
Returns the total size of a collection of entries.
:param entries: ``list`` of entries to calculate the total size of.
"""
size = 0
for entry in entries:
if entry['response']['bodySize'] > 0:
size += entry['response']['bodySize']
return size | python | def get_total_size(self, entries):
"""
Returns the total size of a collection of entries.
:param entries: ``list`` of entries to calculate the total size of.
"""
size = 0
for entry in entries:
if entry['response']['bodySize'] > 0:
size += entry['response']['bodySize']
return size | Returns the total size of a collection of entries.
:param entries: ``list`` of entries to calculate the total size of. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L386-L396 |
mrname/haralyzer | haralyzer/assets.py | HarPage.get_total_size_trans | def get_total_size_trans(self, entries):
"""
Returns the total size of a collection of entries - transferred.
NOTE: use with har file generated with chrome-har-capturer
:param entries: ``list`` of entries to calculate the total size of.
"""
size = 0
for entry in entries:
if entry['response']['_transferSize'] > 0:
size += entry['response']['_transferSize']
return size | python | def get_total_size_trans(self, entries):
"""
Returns the total size of a collection of entries - transferred.
NOTE: use with har file generated with chrome-har-capturer
:param entries: ``list`` of entries to calculate the total size of.
"""
size = 0
for entry in entries:
if entry['response']['_transferSize'] > 0:
size += entry['response']['_transferSize']
return size | Returns the total size of a collection of entries - transferred.
NOTE: use with har file generated with chrome-har-capturer
:param entries: ``list`` of entries to calculate the total size of. | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L398-L410 |
mrname/haralyzer | haralyzer/assets.py | HarPage.time_to_first_byte | def time_to_first_byte(self):
"""
Time to first byte of the page request in ms
"""
# The unknown page is just a placeholder for entries with no page ID.
# As such, it would not have a TTFB
if self.page_id == 'unknown':
return None
ttfb = 0
for entry in self.entries:
if entry['response']['status'] == 200:
for k, v in iteritems(entry['timings']):
if k != 'receive':
if v > 0:
ttfb += v
break
else:
ttfb += entry['time']
return ttfb | python | def time_to_first_byte(self):
"""
Time to first byte of the page request in ms
"""
# The unknown page is just a placeholder for entries with no page ID.
# As such, it would not have a TTFB
if self.page_id == 'unknown':
return None
ttfb = 0
for entry in self.entries:
if entry['response']['status'] == 200:
for k, v in iteritems(entry['timings']):
if k != 'receive':
if v > 0:
ttfb += v
break
else:
ttfb += entry['time']
return ttfb | Time to first byte of the page request in ms | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L449-L468 |
kako-nawao/django-group-by | django_group_by/queryset.py | GroupByQuerySetMixinBase.group_by | def group_by(self, *fields):
"""
Clone the queryset using GroupByQuerySet.
:param fields:
:return:
"""
fields = self._expand_group_by_fields(self.model, fields)
return self._clone(klass=GroupByQuerySet, setup=True, _fields=fields) | python | def group_by(self, *fields):
"""
Clone the queryset using GroupByQuerySet.
:param fields:
:return:
"""
fields = self._expand_group_by_fields(self.model, fields)
return self._clone(klass=GroupByQuerySet, setup=True, _fields=fields) | Clone the queryset using GroupByQuerySet.
:param fields:
:return: | https://github.com/kako-nawao/django-group-by/blob/0d901513620acebc736722b040cff83d6483789a/django_group_by/queryset.py#L34-L42 |
kako-nawao/django-group-by | django_group_by/group.py | AggregatedGroup._data | def _data(self):
"""
Cached data built from instance raw _values as a dictionary.
"""
d = {}
# Iterate all keys and values
for k, v in self._row_values.items():
# Split related model fields
attrs = k.rsplit('__', 1)
# Set value depending case
if len(attrs) == 2:
# Related model field, store nested
fk, fn = attrs
if fk not in d:
d[fk] = {}
d[fk][fn] = v
else:
# Own model field, store directly
d[k] = v
# Return (+cache) data
return d | python | def _data(self):
"""
Cached data built from instance raw _values as a dictionary.
"""
d = {}
# Iterate all keys and values
for k, v in self._row_values.items():
# Split related model fields
attrs = k.rsplit('__', 1)
# Set value depending case
if len(attrs) == 2:
# Related model field, store nested
fk, fn = attrs
if fk not in d:
d[fk] = {}
d[fk][fn] = v
else:
# Own model field, store directly
d[k] = v
# Return (+cache) data
return d | Cached data built from instance raw _values as a dictionary. | https://github.com/kako-nawao/django-group-by/blob/0d901513620acebc736722b040cff83d6483789a/django_group_by/group.py#L19-L43 |
kako-nawao/django-group-by | django_group_by/group.py | AggregatedGroup._set_values | def _set_values(self):
"""
Populate instance with given.
"""
# Iterate all keys and values in data
for k, v in self._data.items():
# If it's a dict, process it (it's probably instance data)
if isinstance(v, dict):
try:
# Get related model from field (follow path)
rel_model = self._model
for attr in k.split('__'):
rel_model = getattr(rel_model, attr).field.related_model
except AttributeError:
# Not a model, maybe it is a dict field (?)
pass
else:
# Model, first shorten field name
k = k.replace('__', '_')
# Now init instance if required (not if we got ID None)
if 'id' in v and v['id'] is None:
# This means we grouped by ID, if it's none then FK is None
v = None
else:
# Either we have ID or we didn't group by ID, use instance
v = rel_model(**v)
# Set value
setattr(self, k, v) | python | def _set_values(self):
"""
Populate instance with given.
"""
# Iterate all keys and values in data
for k, v in self._data.items():
# If it's a dict, process it (it's probably instance data)
if isinstance(v, dict):
try:
# Get related model from field (follow path)
rel_model = self._model
for attr in k.split('__'):
rel_model = getattr(rel_model, attr).field.related_model
except AttributeError:
# Not a model, maybe it is a dict field (?)
pass
else:
# Model, first shorten field name
k = k.replace('__', '_')
# Now init instance if required (not if we got ID None)
if 'id' in v and v['id'] is None:
# This means we grouped by ID, if it's none then FK is None
v = None
else:
# Either we have ID or we didn't group by ID, use instance
v = rel_model(**v)
# Set value
setattr(self, k, v) | Populate instance with given. | https://github.com/kako-nawao/django-group-by/blob/0d901513620acebc736722b040cff83d6483789a/django_group_by/group.py#L45-L77 |
kako-nawao/django-group-by | django_group_by/iterable.py | GroupByIterableMixinBase.group_by | def group_by(self, *fields):
"""
Clone the queryset using GroupByQuerySet.
:param fields:
:return:
"""
fields = self._expand_group_by_fields(self.model, fields)
clone = self._values(*fields)
clone._iterable_class = GroupByIterable
return clone | python | def group_by(self, *fields):
"""
Clone the queryset using GroupByQuerySet.
:param fields:
:return:
"""
fields = self._expand_group_by_fields(self.model, fields)
clone = self._values(*fields)
clone._iterable_class = GroupByIterable
return clone | Clone the queryset using GroupByQuerySet.
:param fields:
:return: | https://github.com/kako-nawao/django-group-by/blob/0d901513620acebc736722b040cff83d6483789a/django_group_by/iterable.py#L37-L47 |
kako-nawao/django-group-by | django_group_by/mixin.py | GroupByMixin._expand_group_by_fields | def _expand_group_by_fields(cls, model, fields):
"""
Expand FK fields into all related object's fields to avoid future
lookups.
:param fields: fields to "group by"
:return: expanded fields
"""
# Containers for resulting fields and related model fields
res = []
related = {}
# Add own fields and populate related fields
for field_name in fields:
if '__' in field_name:
# Related model field: append to related model's fields
fk_field_name, related_field = field_name.split('__', 1)
if fk_field_name not in related:
related[fk_field_name] = [related_field]
else:
related[fk_field_name].append(related_field)
else:
# Simple field, get the field instance
model_field = model._meta.get_field(field_name)
if isinstance(model_field, (ForeignKey, ManyToManyField)):
# It's a related field, get model
related_model = model_field.related_model
# Append all its fields with the correct prefix
res.extend('{}__{}'.format(field_name, f.column)
for f in related_model._meta.fields)
else:
# It's a common field, just append it
res.append(field_name)
# Resolve all related fields
for fk_field_name, field_names in related.items():
# Get field
fk = model._meta.get_field(fk_field_name)
# Get all fields for that related model
related_fields = cls._expand_group_by_fields(fk.related_model,
field_names)
# Append them with the correct prefix
res.extend('{}__{}'.format(fk_field_name, f) for f in related_fields)
# Return all fields
return res | python | def _expand_group_by_fields(cls, model, fields):
"""
Expand FK fields into all related object's fields to avoid future
lookups.
:param fields: fields to "group by"
:return: expanded fields
"""
# Containers for resulting fields and related model fields
res = []
related = {}
# Add own fields and populate related fields
for field_name in fields:
if '__' in field_name:
# Related model field: append to related model's fields
fk_field_name, related_field = field_name.split('__', 1)
if fk_field_name not in related:
related[fk_field_name] = [related_field]
else:
related[fk_field_name].append(related_field)
else:
# Simple field, get the field instance
model_field = model._meta.get_field(field_name)
if isinstance(model_field, (ForeignKey, ManyToManyField)):
# It's a related field, get model
related_model = model_field.related_model
# Append all its fields with the correct prefix
res.extend('{}__{}'.format(field_name, f.column)
for f in related_model._meta.fields)
else:
# It's a common field, just append it
res.append(field_name)
# Resolve all related fields
for fk_field_name, field_names in related.items():
# Get field
fk = model._meta.get_field(fk_field_name)
# Get all fields for that related model
related_fields = cls._expand_group_by_fields(fk.related_model,
field_names)
# Append them with the correct prefix
res.extend('{}__{}'.format(fk_field_name, f) for f in related_fields)
# Return all fields
return res | Expand FK fields into all related object's fields to avoid future
lookups.
:param fields: fields to "group by"
:return: expanded fields | https://github.com/kako-nawao/django-group-by/blob/0d901513620acebc736722b040cff83d6483789a/django_group_by/mixin.py#L23-L74 |
SiLab-Bonn/basil | basil/TL/Dummy.py | Dummy.write | def write(self, addr, data):
'''Write to dummy memory
Parameters
----------
addr : int
The register address.
data : list, tuple
Data (byte array) to be written.
Returns
-------
nothing
'''
logger.debug(
"Dummy SiTransferLayer.write addr: %s data: %s" % (hex(addr), data))
for curr_addr, d in enumerate(data, start=addr):
self.mem[curr_addr] = array.array('B', [d])[0] | python | def write(self, addr, data):
'''Write to dummy memory
Parameters
----------
addr : int
The register address.
data : list, tuple
Data (byte array) to be written.
Returns
-------
nothing
'''
logger.debug(
"Dummy SiTransferLayer.write addr: %s data: %s" % (hex(addr), data))
for curr_addr, d in enumerate(data, start=addr):
self.mem[curr_addr] = array.array('B', [d])[0] | Write to dummy memory
Parameters
----------
addr : int
The register address.
data : list, tuple
Data (byte array) to be written.
Returns
-------
nothing | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/TL/Dummy.py#L36-L53 |
SiLab-Bonn/basil | basil/TL/Dummy.py | Dummy.read | def read(self, addr, size):
'''
Parameters
----------
addr : int
The register address.
size : int
Length of data to be read (number of bytes).
Returns
-------
array : array
Data (byte array) read from memory. Returns 0 for each byte if it hasn't been written to.
'''
logger.debug("Dummy SiTransferLayer.read addr: %s size: %s" % (hex(addr), size))
return array.array('B', [self.mem[curr_addr] if curr_addr in self.mem else 0 for curr_addr in range(addr, addr + size)]) | python | def read(self, addr, size):
'''
Parameters
----------
addr : int
The register address.
size : int
Length of data to be read (number of bytes).
Returns
-------
array : array
Data (byte array) read from memory. Returns 0 for each byte if it hasn't been written to.
'''
logger.debug("Dummy SiTransferLayer.read addr: %s size: %s" % (hex(addr), size))
return array.array('B', [self.mem[curr_addr] if curr_addr in self.mem else 0 for curr_addr in range(addr, addr + size)]) | Parameters
----------
addr : int
The register address.
size : int
Length of data to be read (number of bytes).
Returns
-------
array : array
Data (byte array) read from memory. Returns 0 for each byte if it hasn't been written to. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/TL/Dummy.py#L55-L70 |
SiLab-Bonn/basil | basil/HL/RegisterHardwareLayer.py | RegisterHardwareLayer.set_value | def set_value(self, value, addr, size, offset, **kwargs):
'''Writing a value of any arbitrary size (max. unsigned int 64) and offset to a register
Parameters
----------
value : int, str
The register value (int, long, bit string) to be written.
addr : int
The register address.
size : int
Bit size/length of the value to be written to the register.
offset : int
Offset of the value to be written to the register (in number of bits).
Returns
-------
nothing
'''
div_offset, mod_offset = divmod(offset, 8)
div_size, mod_size = divmod(size + mod_offset, 8)
if mod_size:
div_size += 1
if mod_offset == 0 and mod_size == 0:
reg = BitLogic.from_value(0, size=div_size * 8)
else:
ret = self._intf.read(self._base_addr + addr + div_offset, size=div_size)
reg = BitLogic()
reg.frombytes(tobytes(ret))
reg[size + mod_offset - 1:mod_offset] = value
self._intf.write(self._base_addr + addr + div_offset, data=array.array('B', reg.tobytes())) | python | def set_value(self, value, addr, size, offset, **kwargs):
'''Writing a value of any arbitrary size (max. unsigned int 64) and offset to a register
Parameters
----------
value : int, str
The register value (int, long, bit string) to be written.
addr : int
The register address.
size : int
Bit size/length of the value to be written to the register.
offset : int
Offset of the value to be written to the register (in number of bits).
Returns
-------
nothing
'''
div_offset, mod_offset = divmod(offset, 8)
div_size, mod_size = divmod(size + mod_offset, 8)
if mod_size:
div_size += 1
if mod_offset == 0 and mod_size == 0:
reg = BitLogic.from_value(0, size=div_size * 8)
else:
ret = self._intf.read(self._base_addr + addr + div_offset, size=div_size)
reg = BitLogic()
reg.frombytes(tobytes(ret))
reg[size + mod_offset - 1:mod_offset] = value
self._intf.write(self._base_addr + addr + div_offset, data=array.array('B', reg.tobytes())) | Writing a value of any arbitrary size (max. unsigned int 64) and offset to a register
Parameters
----------
value : int, str
The register value (int, long, bit string) to be written.
addr : int
The register address.
size : int
Bit size/length of the value to be written to the register.
offset : int
Offset of the value to be written to the register (in number of bits).
Returns
-------
nothing | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/RegisterHardwareLayer.py#L81-L110 |
SiLab-Bonn/basil | basil/HL/RegisterHardwareLayer.py | RegisterHardwareLayer.get_value | def get_value(self, addr, size, offset, **kwargs):
'''Reading a value of any arbitrary size (max. unsigned int 64) and offset from a register
Parameters
----------
addr : int
The register address.
size : int
Bit size/length of the value.
offset : int
Offset of the value to be written to the register (in number of bits).
Returns
-------
reg : int
Register value.
'''
div_offset, mod_offset = divmod(offset, 8)
div_size, mod_size = divmod(size + mod_offset, 8)
if mod_size:
div_size += 1
ret = self._intf.read(self._base_addr + addr + div_offset, size=div_size)
reg = BitLogic()
reg.frombytes(tobytes(ret))
return reg[size + mod_offset - 1:mod_offset].tovalue() | python | def get_value(self, addr, size, offset, **kwargs):
'''Reading a value of any arbitrary size (max. unsigned int 64) and offset from a register
Parameters
----------
addr : int
The register address.
size : int
Bit size/length of the value.
offset : int
Offset of the value to be written to the register (in number of bits).
Returns
-------
reg : int
Register value.
'''
div_offset, mod_offset = divmod(offset, 8)
div_size, mod_size = divmod(size + mod_offset, 8)
if mod_size:
div_size += 1
ret = self._intf.read(self._base_addr + addr + div_offset, size=div_size)
reg = BitLogic()
reg.frombytes(tobytes(ret))
return reg[size + mod_offset - 1:mod_offset].tovalue() | Reading a value of any arbitrary size (max. unsigned int 64) and offset from a register
Parameters
----------
addr : int
The register address.
size : int
Bit size/length of the value.
offset : int
Offset of the value to be written to the register (in number of bits).
Returns
-------
reg : int
Register value. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/RegisterHardwareLayer.py#L112-L136 |
SiLab-Bonn/basil | basil/HL/RegisterHardwareLayer.py | RegisterHardwareLayer.set_bytes | def set_bytes(self, data, addr, **kwargs):
'''Writing bytes of any arbitrary size
Parameters
----------
data : iterable
The data (byte array) to be written.
addr : int
The register address.
Returns
-------
nothing
'''
self._intf.write(self._conf['base_addr'] + addr, data) | python | def set_bytes(self, data, addr, **kwargs):
'''Writing bytes of any arbitrary size
Parameters
----------
data : iterable
The data (byte array) to be written.
addr : int
The register address.
Returns
-------
nothing
'''
self._intf.write(self._conf['base_addr'] + addr, data) | Writing bytes of any arbitrary size
Parameters
----------
data : iterable
The data (byte array) to be written.
addr : int
The register address.
Returns
-------
nothing | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/RegisterHardwareLayer.py#L138-L152 |
SiLab-Bonn/basil | basil/HL/RegisterHardwareLayer.py | RegisterHardwareLayer.get_bytes | def get_bytes(self, addr, size, **kwargs):
'''Reading bytes of any arbitrary size
Parameters
----------.
addr : int
The register address.
size : int
Byte length of the value.
Returns
-------
data : iterable
Byte array.
'''
return self._intf.read(self._conf['base_addr'] + addr, size) | python | def get_bytes(self, addr, size, **kwargs):
'''Reading bytes of any arbitrary size
Parameters
----------.
addr : int
The register address.
size : int
Byte length of the value.
Returns
-------
data : iterable
Byte array.
'''
return self._intf.read(self._conf['base_addr'] + addr, size) | Reading bytes of any arbitrary size
Parameters
----------.
addr : int
The register address.
size : int
Byte length of the value.
Returns
-------
data : iterable
Byte array. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/RegisterHardwareLayer.py#L154-L169 |
SiLab-Bonn/basil | basil/RL/StdRegister.py | StdRegister.write | def write(self, size=None):
"""
to call start() automatically, set yaml file as follows:
registers:
- name : CCPD_PCB
type : StdRegister
hw_driver : CCPD_PCB_SPI
size : 32
auto_start : True <------ add this
fields: ......
"""
if size is None:
self._drv.set_data(self.tobytes())
else:
self._drv.set_data(self.tobytes()[:size])
if "auto_start" in self._conf:
if self._conf["auto_start"]:
self._drv.start() | python | def write(self, size=None):
"""
to call start() automatically, set yaml file as follows:
registers:
- name : CCPD_PCB
type : StdRegister
hw_driver : CCPD_PCB_SPI
size : 32
auto_start : True <------ add this
fields: ......
"""
if size is None:
self._drv.set_data(self.tobytes())
else:
self._drv.set_data(self.tobytes()[:size])
if "auto_start" in self._conf:
if self._conf["auto_start"]:
self._drv.start() | to call start() automatically, set yaml file as follows:
registers:
- name : CCPD_PCB
type : StdRegister
hw_driver : CCPD_PCB_SPI
size : 32
auto_start : True <------ add this
fields: ...... | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/RL/StdRegister.py#L102-L120 |
SiLab-Bonn/basil | basil/utils/BitLogic.py | BitLogic.from_value | def from_value(cls, value, size=None, fmt='Q', **kwargs):
'''
Factory method
For format characters see: https://docs.python.org/2/library/struct.html
'''
bl = cls(**kwargs) # size is 0 by default
bl.fromvalue(value=value, size=size, fmt=fmt)
return bl | python | def from_value(cls, value, size=None, fmt='Q', **kwargs):
'''
Factory method
For format characters see: https://docs.python.org/2/library/struct.html
'''
bl = cls(**kwargs) # size is 0 by default
bl.fromvalue(value=value, size=size, fmt=fmt)
return bl | Factory method
For format characters see: https://docs.python.org/2/library/struct.html | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/BitLogic.py#L32-L40 |
SiLab-Bonn/basil | basil/utils/BitLogic.py | BitLogic.fromvalue | def fromvalue(self, value, size=None, fmt='Q'):
'''
Append from a int/long number.
'''
if size and value.bit_length() > size:
raise TypeError('Value is too big for given size')
self.frombytes(struct.pack(fmt, value))
if size:
if not isinstance(size, integer_types) or not size > 0:
raise TypeError('Size must be greater than zero')
if size > self.length():
bitarray.extend(self, (size - self.length()) * [0])
else:
bitarray.__delitem__(self, slice(size, self.length())) | python | def fromvalue(self, value, size=None, fmt='Q'):
'''
Append from a int/long number.
'''
if size and value.bit_length() > size:
raise TypeError('Value is too big for given size')
self.frombytes(struct.pack(fmt, value))
if size:
if not isinstance(size, integer_types) or not size > 0:
raise TypeError('Size must be greater than zero')
if size > self.length():
bitarray.extend(self, (size - self.length()) * [0])
else:
bitarray.__delitem__(self, slice(size, self.length())) | Append from a int/long number. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/BitLogic.py#L42-L55 |
SiLab-Bonn/basil | basil/utils/BitLogic.py | BitLogic.tovalue | def tovalue(self, fmt='Q'):
'''
Convert bitstring to a int/long number.
'''
format_size = struct.calcsize(fmt)
if self.length() > format_size * 8:
raise TypeError('Cannot convert to number')
ba = self.copy()
ba.extend((format_size * 8 - self.length()) * [0])
return struct.unpack_from(fmt, ba.tobytes())[0] | python | def tovalue(self, fmt='Q'):
'''
Convert bitstring to a int/long number.
'''
format_size = struct.calcsize(fmt)
if self.length() > format_size * 8:
raise TypeError('Cannot convert to number')
ba = self.copy()
ba.extend((format_size * 8 - self.length()) * [0])
return struct.unpack_from(fmt, ba.tobytes())[0] | Convert bitstring to a int/long number. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/BitLogic.py#L57-L66 |
SiLab-Bonn/basil | basil/utils/BitLogic.py | BitLogic._swap_slice_indices | def _swap_slice_indices(self, slc, make_slice=False):
'''Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing.
'''
try:
start = slc.start
stop = slc.stop
slc_step = slc.step
except AttributeError:
if make_slice:
if slc < 0:
slc += self.length()
return slice(slc, slc + 1)
else:
return slc
else:
if not start and start != 0:
slc_stop = self.length()
elif start < 0:
slc_stop = self.length() + start + 1
else:
slc_stop = start + 1
if not stop and stop != 0:
slc_start = 0
elif stop < 0:
slc_start = self.length() + stop
else:
slc_start = stop
return slice(slc_start, slc_stop, slc_step) | python | def _swap_slice_indices(self, slc, make_slice=False):
'''Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing.
'''
try:
start = slc.start
stop = slc.stop
slc_step = slc.step
except AttributeError:
if make_slice:
if slc < 0:
slc += self.length()
return slice(slc, slc + 1)
else:
return slc
else:
if not start and start != 0:
slc_stop = self.length()
elif start < 0:
slc_stop = self.length() + start + 1
else:
slc_stop = start + 1
if not stop and stop != 0:
slc_start = 0
elif stop < 0:
slc_start = self.length() + stop
else:
slc_start = stop
return slice(slc_start, slc_stop, slc_step) | Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/BitLogic.py#L107-L136 |
SiLab-Bonn/basil | examples/lx9/host/lx9.py | Pixel._run_seq | def _run_seq(self, size):
"""
Send the contents of self['SEQ'] to the chip and wait until it finishes.
"""
# Write the sequence to the sequence generator (hw driver)
self['SEQ'].write(size) #write pattern to memory
self['SEQ'].set_size(size) # set size
self['SEQ'].set_repeat(1) # set repeat
for _ in range(1):
self['SEQ'].start() # start
while not self['SEQ'].get_done():
#time.sleep(0.1)
print("Wait for done...") | python | def _run_seq(self, size):
"""
Send the contents of self['SEQ'] to the chip and wait until it finishes.
"""
# Write the sequence to the sequence generator (hw driver)
self['SEQ'].write(size) #write pattern to memory
self['SEQ'].set_size(size) # set size
self['SEQ'].set_repeat(1) # set repeat
for _ in range(1):
self['SEQ'].start() # start
while not self['SEQ'].get_done():
#time.sleep(0.1)
print("Wait for done...") | Send the contents of self['SEQ'] to the chip and wait until it finishes. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/examples/lx9/host/lx9.py#L73-L89 |
SiLab-Bonn/basil | examples/lx9/host/lx9.py | Pixel._clear_strobes | def _clear_strobes(self):
"""
Resets the "enable" and "load" output streams to all 0.
"""
#reset some stuff
self['SEQ']['GLOBAL_SHIFT_EN'].setall(False)
self['SEQ']['GLOBAL_CTR_LD'].setall(False)
self['SEQ']['GLOBAL_DAC_LD'].setall(False)
self['SEQ']['PIXEL_SHIFT_EN'].setall(False)
self['SEQ']['INJECTION'].setall(False) | python | def _clear_strobes(self):
"""
Resets the "enable" and "load" output streams to all 0.
"""
#reset some stuff
self['SEQ']['GLOBAL_SHIFT_EN'].setall(False)
self['SEQ']['GLOBAL_CTR_LD'].setall(False)
self['SEQ']['GLOBAL_DAC_LD'].setall(False)
self['SEQ']['PIXEL_SHIFT_EN'].setall(False)
self['SEQ']['INJECTION'].setall(False) | Resets the "enable" and "load" output streams to all 0. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/examples/lx9/host/lx9.py#L91-L101 |
SiLab-Bonn/basil | basil/HL/spi.py | spi.set_data | def set_data(self, data, addr=0):
'''
Sets data for outgoing stream
'''
if self._mem_bytes < len(data):
raise ValueError('Size of data (%d bytes) is too big for memory (%d bytes)' % (len(data), self._mem_bytes))
self._intf.write(self._conf['base_addr'] + self._spi_mem_offset + addr, data) | python | def set_data(self, data, addr=0):
'''
Sets data for outgoing stream
'''
if self._mem_bytes < len(data):
raise ValueError('Size of data (%d bytes) is too big for memory (%d bytes)' % (len(data), self._mem_bytes))
self._intf.write(self._conf['base_addr'] + self._spi_mem_offset + addr, data) | Sets data for outgoing stream | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/spi.py#L107-L113 |
SiLab-Bonn/basil | basil/HL/spi.py | spi.get_data | def get_data(self, size=None, addr=None):
'''
Gets data for incoming stream
'''
# readback memory offset
if addr is None:
addr = self._mem_bytes
if size and self._mem_bytes < size:
raise ValueError('Size is too big')
if size is None:
return self._intf.read(self._conf['base_addr'] + self._spi_mem_offset + addr, self._mem_bytes)
else:
return self._intf.read(self._conf['base_addr'] + self._spi_mem_offset + addr, size) | python | def get_data(self, size=None, addr=None):
'''
Gets data for incoming stream
'''
# readback memory offset
if addr is None:
addr = self._mem_bytes
if size and self._mem_bytes < size:
raise ValueError('Size is too big')
if size is None:
return self._intf.read(self._conf['base_addr'] + self._spi_mem_offset + addr, self._mem_bytes)
else:
return self._intf.read(self._conf['base_addr'] + self._spi_mem_offset + addr, size) | Gets data for incoming stream | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/spi.py#L116-L130 |
SiLab-Bonn/basil | basil/TL/Serial.py | Serial.init | def init(self):
'''
Initialize serial device.
Parameters of serial.Serial: http://pyserial.sourceforge.net/pyserial_api.html
Plus termination string parameter eol
'''
super(Serial, self).init()
self.read_termination = self._init.get('read_termination', None)
self.write_termination = self._init.get('write_termination', self.read_termination)
try:
self.read_termination = bytes(self.read_termination, 'utf-8')
self.write_termination = bytes(self.write_termination, 'utf-8')
except TypeError as e:
logger.debug(e)
self.timeout = self._init.get('timeout', None) # timeout of 0 returns immediately
self._port = serial.Serial(**{key: value for key, value in self._init.items() if key not in ("read_termination", "write_termination")}) | python | def init(self):
'''
Initialize serial device.
Parameters of serial.Serial: http://pyserial.sourceforge.net/pyserial_api.html
Plus termination string parameter eol
'''
super(Serial, self).init()
self.read_termination = self._init.get('read_termination', None)
self.write_termination = self._init.get('write_termination', self.read_termination)
try:
self.read_termination = bytes(self.read_termination, 'utf-8')
self.write_termination = bytes(self.write_termination, 'utf-8')
except TypeError as e:
logger.debug(e)
self.timeout = self._init.get('timeout', None) # timeout of 0 returns immediately
self._port = serial.Serial(**{key: value for key, value in self._init.items() if key not in ("read_termination", "write_termination")}) | Initialize serial device.
Parameters of serial.Serial: http://pyserial.sourceforge.net/pyserial_api.html
Plus termination string parameter eol | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/TL/Serial.py#L24-L40 |
SiLab-Bonn/basil | basil/HL/GPAC.py | AdcMax11644._setup_adc | def _setup_adc(self, flags):
'''Initialize ADC
'''
self._intf.write(self._base_addr + self.MAX11644_ADD, array('B', pack('B', flags | self.MAX11644_SETUP))) | python | def _setup_adc(self, flags):
'''Initialize ADC
'''
self._intf.write(self._base_addr + self.MAX11644_ADD, array('B', pack('B', flags | self.MAX11644_SETUP))) | Initialize ADC | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L174-L177 |
SiLab-Bonn/basil | basil/HL/GPAC.py | GPAC.read_eeprom_calibration | def read_eeprom_calibration(self): # use default values for temperature, EEPROM values are usually not calibrated and random
'''Reading EEPROM calibration for sources and regulators
'''
header = self.get_format()
if header == self.HEADER_GPAC:
data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_GPAC_FORMAT))
for idx, channel in enumerate(self._ch_cal.iterkeys()):
ch_data = data[idx * calcsize(self.CAL_DATA_CH_GPAC_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_GPAC_FORMAT)]
values = unpack_from(self.CAL_DATA_CH_GPAC_FORMAT, ch_data)
self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip()
self._ch_cal[channel]['default'] = values[1]
self._ch_cal[channel]['min'] = values[2]
self._ch_cal[channel]['max'] = values[3]
self._ch_cal[channel]['ADCI']['gain'] = -values[4] # fix gain sign
self._ch_cal[channel]['ADCI']['offset'] = values[5]
self._ch_cal[channel]['ADCV']['gain'] = values[6]
self._ch_cal[channel]['ADCV']['offset'] = values[7]
self._ch_cal[channel]['DAC']['gain'] = values[8]
self._ch_cal[channel]['DAC']['offset'] = values[9]
self._ch_cal[channel]['limit'] = values[10]
else:
raise ValueError('EEPROM data format not supported (header: %s)' % header) | python | def read_eeprom_calibration(self): # use default values for temperature, EEPROM values are usually not calibrated and random
'''Reading EEPROM calibration for sources and regulators
'''
header = self.get_format()
if header == self.HEADER_GPAC:
data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_GPAC_FORMAT))
for idx, channel in enumerate(self._ch_cal.iterkeys()):
ch_data = data[idx * calcsize(self.CAL_DATA_CH_GPAC_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_GPAC_FORMAT)]
values = unpack_from(self.CAL_DATA_CH_GPAC_FORMAT, ch_data)
self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip()
self._ch_cal[channel]['default'] = values[1]
self._ch_cal[channel]['min'] = values[2]
self._ch_cal[channel]['max'] = values[3]
self._ch_cal[channel]['ADCI']['gain'] = -values[4] # fix gain sign
self._ch_cal[channel]['ADCI']['offset'] = values[5]
self._ch_cal[channel]['ADCV']['gain'] = values[6]
self._ch_cal[channel]['ADCV']['offset'] = values[7]
self._ch_cal[channel]['DAC']['gain'] = values[8]
self._ch_cal[channel]['DAC']['offset'] = values[9]
self._ch_cal[channel]['limit'] = values[10]
else:
raise ValueError('EEPROM data format not supported (header: %s)' % header) | Reading EEPROM calibration for sources and regulators | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L716-L737 |
SiLab-Bonn/basil | basil/HL/GPAC.py | GPAC.get_voltage | def get_voltage(self, channel, unit='V'):
'''Reading voltage
'''
adc_ch = self._ch_map[channel]['ADCV']['adc_ch']
address = self._ch_map[channel]['ADCV']['address']
raw = self._get_adc_value(address=address)[adc_ch]
dac_offset = self._ch_cal[channel]['ADCV']['offset']
dac_gain = self._ch_cal[channel]['ADCV']['gain']
voltage = ((raw - dac_offset) / dac_gain)
if unit == 'raw':
return raw
elif unit == 'V':
return voltage / 1000
elif unit == 'mV':
return voltage
else:
raise TypeError("Invalid unit type.") | python | def get_voltage(self, channel, unit='V'):
'''Reading voltage
'''
adc_ch = self._ch_map[channel]['ADCV']['adc_ch']
address = self._ch_map[channel]['ADCV']['address']
raw = self._get_adc_value(address=address)[adc_ch]
dac_offset = self._ch_cal[channel]['ADCV']['offset']
dac_gain = self._ch_cal[channel]['ADCV']['gain']
voltage = ((raw - dac_offset) / dac_gain)
if unit == 'raw':
return raw
elif unit == 'V':
return voltage / 1000
elif unit == 'mV':
return voltage
else:
raise TypeError("Invalid unit type.") | Reading voltage | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L764-L783 |
SiLab-Bonn/basil | basil/HL/GPAC.py | GPAC.get_current | def get_current(self, channel, unit='A'):
'''Reading current
'''
values = self._get_adc_value(address=self._ch_map[channel]['ADCI']['address'])
raw = values[self._ch_map[channel]['ADCI']['adc_ch']]
dac_offset = self._ch_cal[channel]['ADCI']['offset']
dac_gain = self._ch_cal[channel]['ADCI']['gain']
if 'PWR' in channel:
current = ((raw - dac_offset) / dac_gain)
if unit == 'raw':
return raw
elif unit == 'A':
return current / 1000
elif unit == 'mA':
return current
elif unit == 'uA':
return current * 1000
else:
raise TypeError("Invalid unit type.")
else:
voltage = values[self._ch_map[channel]['ADCV']['adc_ch']]
current = (((raw - voltage) - dac_offset) / dac_gain)
if unit == 'raw':
return raw
elif unit == 'A':
return current / 1000000
elif unit == 'mA':
return current / 1000
elif unit == 'uA':
return current
else:
raise TypeError("Invalid unit type.") | python | def get_current(self, channel, unit='A'):
'''Reading current
'''
values = self._get_adc_value(address=self._ch_map[channel]['ADCI']['address'])
raw = values[self._ch_map[channel]['ADCI']['adc_ch']]
dac_offset = self._ch_cal[channel]['ADCI']['offset']
dac_gain = self._ch_cal[channel]['ADCI']['gain']
if 'PWR' in channel:
current = ((raw - dac_offset) / dac_gain)
if unit == 'raw':
return raw
elif unit == 'A':
return current / 1000
elif unit == 'mA':
return current
elif unit == 'uA':
return current * 1000
else:
raise TypeError("Invalid unit type.")
else:
voltage = values[self._ch_map[channel]['ADCV']['adc_ch']]
current = (((raw - voltage) - dac_offset) / dac_gain)
if unit == 'raw':
return raw
elif unit == 'A':
return current / 1000000
elif unit == 'mA':
return current / 1000
elif unit == 'uA':
return current
else:
raise TypeError("Invalid unit type.") | Reading current | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L785-L819 |
SiLab-Bonn/basil | basil/HL/GPAC.py | GPAC.set_enable | def set_enable(self, channel, value):
'''Enable/Disable output of power channel
'''
try:
bit = self._ch_map[channel]['GPIOEN']['bit']
except KeyError:
raise ValueError('set_enable() not supported for channel %s' % channel)
self._set_power_gpio_value(bit=bit, value=value) | python | def set_enable(self, channel, value):
'''Enable/Disable output of power channel
'''
try:
bit = self._ch_map[channel]['GPIOEN']['bit']
except KeyError:
raise ValueError('set_enable() not supported for channel %s' % channel)
self._set_power_gpio_value(bit=bit, value=value) | Enable/Disable output of power channel | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L821-L828 |
SiLab-Bonn/basil | basil/HL/GPAC.py | GPAC.get_over_current | def get_over_current(self, channel):
'''Reading over current status of power channel
'''
try:
bit = self._ch_map[channel]['GPIOOC']['bit']
except KeyError:
raise ValueError('get_over_current() not supported for channel %s' % channel)
return not self._get_power_gpio_value(bit) | python | def get_over_current(self, channel):
'''Reading over current status of power channel
'''
try:
bit = self._ch_map[channel]['GPIOOC']['bit']
except KeyError:
raise ValueError('get_over_current() not supported for channel %s' % channel)
return not self._get_power_gpio_value(bit) | Reading over current status of power channel | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L830-L837 |
SiLab-Bonn/basil | basil/HL/GPAC.py | GPAC.set_current_limit | def set_current_limit(self, channel, value, unit='A'):
'''Setting current limit
Note: same limit for all channels.
'''
# TODO: add units / calibration
if unit == 'raw':
value = value
elif unit == 'A':
value = int(value * 1000 * self.CURRENT_LIMIT_GAIN)
elif unit == 'mA':
value = int(value * self.CURRENT_LIMIT_GAIN)
elif unit == 'uA':
value = int(value / 1000 * self.CURRENT_LIMIT_GAIN)
else:
raise TypeError("Invalid unit type.")
I2cAnalogChannel._set_dac_value(self, address=self.CURRENT_LIMIT_DAC_SLAVE_ADD, dac_ch=self.CURRENT_LIMIT_DAC_CH, value=value) | python | def set_current_limit(self, channel, value, unit='A'):
'''Setting current limit
Note: same limit for all channels.
'''
# TODO: add units / calibration
if unit == 'raw':
value = value
elif unit == 'A':
value = int(value * 1000 * self.CURRENT_LIMIT_GAIN)
elif unit == 'mA':
value = int(value * self.CURRENT_LIMIT_GAIN)
elif unit == 'uA':
value = int(value / 1000 * self.CURRENT_LIMIT_GAIN)
else:
raise TypeError("Invalid unit type.")
I2cAnalogChannel._set_dac_value(self, address=self.CURRENT_LIMIT_DAC_SLAVE_ADD, dac_ch=self.CURRENT_LIMIT_DAC_CH, value=value) | Setting current limit
Note: same limit for all channels. | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L839-L856 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.