language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def pairs(tensor1, tensor2, name="pairs"):
"""Pairwise combination of elements from the two tensors.
Example:
```python
t1 = [[0],[1]]
t2 = [2,3,4]
t12 = [[0,2],[1,2],[0,3],[1,3],[0,4],[1,4]]
p12 = tx.pairs(t1,t2)
tf.reduce_all(tf.equal(p12,t12))
```
Args:
tensor1 (`Tensor`): a tensor, python list, or numpy array
tensor2 (`Tensor`): a tensor, python list, or numpy array
name (`str`): name for pairs op)
Returns:
tensor (`Tensor`): a tensor with the pairwise combination of input tensors
"""
tensor1 = tf.convert_to_tensor(tensor1)
tensor2 = tf.convert_to_tensor(tensor2)
with tf.name_scope(name):
x, y = tf.meshgrid(tensor1, tensor2)
result = tf.stack([x, y], axis=-1)
result = tf.reshape(result, [-1, 2])
return result | def pairs(tensor1, tensor2, name="pairs"):
"""Pairwise combination of elements from the two tensors.
Example:
```python
t1 = [[0],[1]]
t2 = [2,3,4]
t12 = [[0,2],[1,2],[0,3],[1,3],[0,4],[1,4]]
p12 = tx.pairs(t1,t2)
tf.reduce_all(tf.equal(p12,t12))
```
Args:
tensor1 (`Tensor`): a tensor, python list, or numpy array
tensor2 (`Tensor`): a tensor, python list, or numpy array
name (`str`): name for pairs op)
Returns:
tensor (`Tensor`): a tensor with the pairwise combination of input tensors
"""
tensor1 = tf.convert_to_tensor(tensor1)
tensor2 = tf.convert_to_tensor(tensor2)
with tf.name_scope(name):
x, y = tf.meshgrid(tensor1, tensor2)
result = tf.stack([x, y], axis=-1)
result = tf.reshape(result, [-1, 2])
return result |
Python | def sparse_put(sp_tensor, sp_updates, name="sparse_put"):
""" sparse_put
Changes a given tf.SparseTensor according to the updates specified in a tf.SparseTensor.
Creates a new tensor where the values of the updates override the
values in the original tensor. The input tensors must have the same
`dense_shape`.
Args:
sp_tensor (`SparseTensor`): a sparse tensor we which to set some indices to given values
sp_updates (`SparseTensor): a ``SparseTensor`` with the indices to be changed and the respective values
name (`str`): sparse_put op name
Returns:
sparse_tensor (`SparseTensor`): a sparse tensor with the updated values.
"""
with tf.name_scope(name=name):
if sp_updates.dtype != sp_tensor.dtype:
sp_updates = tf.cast(sp_updates, sp_tensor.dtype)
# 1 concat indices and establish final tensor shape
update_shape = tf.shape(sp_updates.values)
zero_updates = tf.SparseTensor(sp_updates.indices,
tf.zeros(update_shape, dtype=tf.float32),
sp_updates.dense_shape)
proto_result = tf.sparse.add(sp_tensor, zero_updates)
# shape of resulting values tensor
proto_shape = tf.shape(proto_result.values)
# 2 get mask for input tensor
proto_ones = tf.SparseTensor(proto_result.indices,
tf.ones(proto_shape, tf.int32),
proto_result.dense_shape)
# mask_ones = tf.math.scalar_mul(-1, tf.ones(update_shape))
sp_mask = tf.SparseTensor(sp_updates.indices,
tf.ones_like(sp_updates.values, dtype=tf.int32) * -1,
sp_updates.dense_shape)
to_retain = tf.sparse.add(proto_ones, sp_mask)
to_retain = tf.not_equal(to_retain.values, 0)
# get tensor with masked values
tensor_masked = tf.sparse.retain(proto_result, to_retain)
# add values to entries previously set to 0
new_tensor = tf.sparse.add(tensor_masked, sp_updates)
return new_tensor | def sparse_put(sp_tensor, sp_updates, name="sparse_put"):
""" sparse_put
Changes a given tf.SparseTensor according to the updates specified in a tf.SparseTensor.
Creates a new tensor where the values of the updates override the
values in the original tensor. The input tensors must have the same
`dense_shape`.
Args:
sp_tensor (`SparseTensor`): a sparse tensor we which to set some indices to given values
sp_updates (`SparseTensor): a ``SparseTensor`` with the indices to be changed and the respective values
name (`str`): sparse_put op name
Returns:
sparse_tensor (`SparseTensor`): a sparse tensor with the updated values.
"""
with tf.name_scope(name=name):
if sp_updates.dtype != sp_tensor.dtype:
sp_updates = tf.cast(sp_updates, sp_tensor.dtype)
# 1 concat indices and establish final tensor shape
update_shape = tf.shape(sp_updates.values)
zero_updates = tf.SparseTensor(sp_updates.indices,
tf.zeros(update_shape, dtype=tf.float32),
sp_updates.dense_shape)
proto_result = tf.sparse.add(sp_tensor, zero_updates)
# shape of resulting values tensor
proto_shape = tf.shape(proto_result.values)
# 2 get mask for input tensor
proto_ones = tf.SparseTensor(proto_result.indices,
tf.ones(proto_shape, tf.int32),
proto_result.dense_shape)
# mask_ones = tf.math.scalar_mul(-1, tf.ones(update_shape))
sp_mask = tf.SparseTensor(sp_updates.indices,
tf.ones_like(sp_updates.values, dtype=tf.int32) * -1,
sp_updates.dense_shape)
to_retain = tf.sparse.add(proto_ones, sp_mask)
to_retain = tf.not_equal(to_retain.values, 0)
# get tensor with masked values
tensor_masked = tf.sparse.retain(proto_result, to_retain)
# add values to entries previously set to 0
new_tensor = tf.sparse.add(tensor_masked, sp_updates)
return new_tensor |
Python | def put(tensor, sp_updates, name="put"):
""" put
Changes a given dense ``Tensor`` according to the updates specified in a ``SparseTensor``.
Creates a new ``Tensor`` where the values of the updates override the
values in the original tensor. The tensor `shape` must be the same as the updates `dense_shape`.
Args:
tensor (`Tensor`): tensor to be updated
sp_updates (`SparseTensor`): sparse tensor with the indices to be changed and the respective values.
name (`str`): put op name
Returns:
tensor (`Tensor`): a tensor with the updated values.
"""
tensor = as_tensor(tensor)
with tf.name_scope(name=name):
if sp_updates.dtype != tensor.dtype:
sp_updates = tf.cast(sp_updates, tensor.dtype)
markers = tf.ones(shape=tf.shape(sp_updates.values))
sparse_marker_tensor = tf.SparseTensor(indices=sp_updates.indices,
values=markers,
dense_shape=sp_updates.dense_shape)
dense_update_marker = tf.sparse.to_dense(sparse_marker_tensor)
dense_updates = tf.sparse.to_dense(sp_updates)
new_tensor = tf.where(tf.not_equal(dense_update_marker, 0),
dense_updates,
tensor)
return new_tensor | def put(tensor, sp_updates, name="put"):
""" put
Changes a given dense ``Tensor`` according to the updates specified in a ``SparseTensor``.
Creates a new ``Tensor`` where the values of the updates override the
values in the original tensor. The tensor `shape` must be the same as the updates `dense_shape`.
Args:
tensor (`Tensor`): tensor to be updated
sp_updates (`SparseTensor`): sparse tensor with the indices to be changed and the respective values.
name (`str`): put op name
Returns:
tensor (`Tensor`): a tensor with the updated values.
"""
tensor = as_tensor(tensor)
with tf.name_scope(name=name):
if sp_updates.dtype != tensor.dtype:
sp_updates = tf.cast(sp_updates, tensor.dtype)
markers = tf.ones(shape=tf.shape(sp_updates.values))
sparse_marker_tensor = tf.SparseTensor(indices=sp_updates.indices,
values=markers,
dense_shape=sp_updates.dense_shape)
dense_update_marker = tf.sparse.to_dense(sparse_marker_tensor)
dense_updates = tf.sparse.to_dense(sp_updates)
new_tensor = tf.where(tf.not_equal(dense_update_marker, 0),
dense_updates,
tensor)
return new_tensor |
Python | def filter_nd(condition, params, name="filter_nd"):
""" filter_nd
Filters a given tensor based on a condition tensor
condition and params must have the same shape
Args:
condition (`Tensor`): a `bool` tensor used to filter params
params (`Tensor`): the tensor to be filtered
name (`str`): name for filter_nd op
Returns:
sp_tensor (`SparseTensor`): a sparse tensor with the values in params filtered according to condition
"""
with tf.name_scope(name=name):
indices = tf.cast(tf.where(condition), dtype=tf.int64)
values = tf.gather_nd(params, indices)
dense_shape = tf.cast(tf.shape(params), tf.int64)
sp_result = tf.SparseTensor(indices, values, dense_shape)
return sp_result | def filter_nd(condition, params, name="filter_nd"):
""" filter_nd
Filters a given tensor based on a condition tensor
condition and params must have the same shape
Args:
condition (`Tensor`): a `bool` tensor used to filter params
params (`Tensor`): the tensor to be filtered
name (`str`): name for filter_nd op
Returns:
sp_tensor (`SparseTensor`): a sparse tensor with the values in params filtered according to condition
"""
with tf.name_scope(name=name):
indices = tf.cast(tf.where(condition), dtype=tf.int64)
values = tf.gather_nd(params, indices)
dense_shape = tf.cast(tf.shape(params), tf.int64)
sp_result = tf.SparseTensor(indices, values, dense_shape)
return sp_result |
Python | def sparse_overlap(sp_tensor1, sp_tensor2, name="sparse_overlap"):
""" Returns a `SparseTensor` where the indices of the two tensors overlap returning a `SparseTensor`
with the values of the first one
Args:
sp_tensor1 (`SparseTensor`): a `SparseTensor`
sp_tensor2 (`SparseTensor`): another `SparseTensor`
name (`str`): name for this op
Returns:
sp1 (`SparseTensor`): sparse tensor with the overlapping indices and values of the first tensor
"""
with tf.name_scope(name):
ones1 = sparse_ones(sp_tensor1.indices, sp_tensor1.dense_shape)
ones2 = sparse_ones(sp_tensor2.indices, sp_tensor2.dense_shape)
index_union = tf.sparse.add(ones1, ones2)
index_filter = tf.math.equal(index_union.values, 2.)
zeros1 = sparse_zeros(index_union.indices, index_union.dense_shape, sp_tensor1.values.dtype)
expand1 = tf.sparse.add(zeros1, sp_tensor1)
filtered = tf.sparse.retain(expand1, index_filter)
return filtered | def sparse_overlap(sp_tensor1, sp_tensor2, name="sparse_overlap"):
""" Returns a `SparseTensor` where the indices of the two tensors overlap returning a `SparseTensor`
with the values of the first one
Args:
sp_tensor1 (`SparseTensor`): a `SparseTensor`
sp_tensor2 (`SparseTensor`): another `SparseTensor`
name (`str`): name for this op
Returns:
sp1 (`SparseTensor`): sparse tensor with the overlapping indices and values of the first tensor
"""
with tf.name_scope(name):
ones1 = sparse_ones(sp_tensor1.indices, sp_tensor1.dense_shape)
ones2 = sparse_ones(sp_tensor2.indices, sp_tensor2.dense_shape)
index_union = tf.sparse.add(ones1, ones2)
index_filter = tf.math.equal(index_union.values, 2.)
zeros1 = sparse_zeros(index_union.indices, index_union.dense_shape, sp_tensor1.values.dtype)
expand1 = tf.sparse.add(zeros1, sp_tensor1)
filtered = tf.sparse.retain(expand1, index_filter)
return filtered |
Python | def zeros_init():
""" Zeroes Initializer
Initializer that generates tensors initialized to 0.
Returns:
initializer (Callable): an initializer that returns a tensor filled with 0 when called on a given shape.
"""
return tf.zeros_initializer() | def zeros_init():
""" Zeroes Initializer
Initializer that generates tensors initialized to 0.
Returns:
initializer (Callable): an initializer that returns a tensor filled with 0 when called on a given shape.
"""
return tf.zeros_initializer() |
Python | def ones_init():
""" Ones Initializer
Initializer that generates tensors initialized to 1.
Returns:
initializer (Callable): an initializer that returns a tensor filled with 1 when called on a given shape.
"""
return tf.ones_initializer() | def ones_init():
""" Ones Initializer
Initializer that generates tensors initialized to 1.
Returns:
initializer (Callable): an initializer that returns a tensor filled with 1 when called on a given shape.
"""
return tf.ones_initializer() |
Python | def constant_init(value=0):
""" Constant Initializer
The resulting tensor is populated with values of type dtype, as specified by arguments value
following the desired shape.
The argument value can be a constant value, or a list of values of type dtype. If value is a list, then the length
of the list must be less than or equal to the number of elements implied by the desired shape of the tensor.
In the case where the total number of elements in value is less than the number of elements required by the tensor
shape, the last element in value will be used to fill the remaining entries. If the total number of elements in
value is greater than the number of elements required by the tensor shape, the initializer will raise a ValueError.
Args:
value: A Python scalar, list or tuple of values, or a N-dimensional numpy array. All elements of
the initialized variable will be set to the corresponding value in the value argument.
Returns:
initializer (Callable): an initializer that returns a tensor from the given specification and a given shape
"""
return tf.constant_initializer(value) | def constant_init(value=0):
""" Constant Initializer
The resulting tensor is populated with values of type dtype, as specified by arguments value
following the desired shape.
The argument value can be a constant value, or a list of values of type dtype. If value is a list, then the length
of the list must be less than or equal to the number of elements implied by the desired shape of the tensor.
In the case where the total number of elements in value is less than the number of elements required by the tensor
shape, the last element in value will be used to fill the remaining entries. If the total number of elements in
value is greater than the number of elements required by the tensor shape, the initializer will raise a ValueError.
Args:
value: A Python scalar, list or tuple of values, or a N-dimensional numpy array. All elements of
the initialized variable will be set to the corresponding value in the value argument.
Returns:
initializer (Callable): an initializer that returns a tensor from the given specification and a given shape
"""
return tf.constant_initializer(value) |
Python | def uniform_init(minval: float = -0.05, maxval: float = 0.05, seed=None):
""" Random Uniform Initializer
Initializer that generates tensors with a uniform distribution.
Args:
minval: Lower bound of the range of random values to generate.
maxval: Upper bound of the range of random values to generate. Defaults to 1 for float types.
seed (int32/int64): seed for random number generator
Returns:
initializer (Callable): an initializer that returns a tensor from the given specification and a given shape
"""
return tf.random_uniform_initializer(minval=minval, maxval=maxval, seed=seed) | def uniform_init(minval: float = -0.05, maxval: float = 0.05, seed=None):
""" Random Uniform Initializer
Initializer that generates tensors with a uniform distribution.
Args:
minval: Lower bound of the range of random values to generate.
maxval: Upper bound of the range of random values to generate. Defaults to 1 for float types.
seed (int32/int64): seed for random number generator
Returns:
initializer (Callable): an initializer that returns a tensor from the given specification and a given shape
"""
return tf.random_uniform_initializer(minval=minval, maxval=maxval, seed=seed) |
Python | def normal_init(mean: float = 0.0, stddev=0.05, seed=None):
""" Random Normal Initializer
Initializer that generates tensors with a normal distribution.
Args:
mean: Mean of the random values to generate.
stddev: Standard deviation of the random values to generate.
seed (int32/int64): seed for random number generator
Returns:
initializer (Callable): an initializer that returns a tensor from the given specification and a given shape
"""
return tf.random_normal_initializer(mean=mean, stddev=stddev, seed=seed) | def normal_init(mean: float = 0.0, stddev=0.05, seed=None):
""" Random Normal Initializer
Initializer that generates tensors with a normal distribution.
Args:
mean: Mean of the random values to generate.
stddev: Standard deviation of the random values to generate.
seed (int32/int64): seed for random number generator
Returns:
initializer (Callable): an initializer that returns a tensor from the given specification and a given shape
"""
return tf.random_normal_initializer(mean=mean, stddev=stddev, seed=seed) |
Python | def glorot_uniform_init(seed: Optional = None) -> Callable:
""" Glorot Uniform Initializer
This initialisation keeps the scale of the gradients roughly the same in all layers to
mitigate `vanishing` and `exploding gradients` see [1].
References:
[1] (Glorot and Bengio 2010), "Understanding the difficulty of training deep
feedforward neural networks".
Args:
seed (int32/int64): seed for random number generator
Returns:
initializer (Callable): callable that creates an initial value from a given shape
"""
return tf.initializers.glorot_uniform(seed) | def glorot_uniform_init(seed: Optional = None) -> Callable:
""" Glorot Uniform Initializer
This initialisation keeps the scale of the gradients roughly the same in all layers to
mitigate `vanishing` and `exploding gradients` see [1].
References:
[1] (Glorot and Bengio 2010), "Understanding the difficulty of training deep
feedforward neural networks".
Args:
seed (int32/int64): seed for random number generator
Returns:
initializer (Callable): callable that creates an initial value from a given shape
"""
return tf.initializers.glorot_uniform(seed) |
Python | def glorot_normal_init(seed: Optional = None) -> Callable:
""" Glorot Normal Initializer
This initialisation keeps the scale of the gradients roughly the same in all layers to
mitigate `vanishing` and `exploding gradients` see [1].
Draws samples from a truncated normal distribution.
References:
[1] (Glorot and Bengio 2010), "Understanding the difficulty of training deep
feedforward neural networks".
Args:
seed (int32/int64): seed for random number generator
Returns:
initializer (Callable): callable that creates an initial value from a given shape
"""
return tf.initializers.glorot_normal(seed) | def glorot_normal_init(seed: Optional = None) -> Callable:
""" Glorot Normal Initializer
This initialisation keeps the scale of the gradients roughly the same in all layers to
mitigate `vanishing` and `exploding gradients` see [1].
Draws samples from a truncated normal distribution.
References:
[1] (Glorot and Bengio 2010), "Understanding the difficulty of training deep
feedforward neural networks".
Args:
seed (int32/int64): seed for random number generator
Returns:
initializer (Callable): callable that creates an initial value from a given shape
"""
return tf.initializers.glorot_normal(seed) |
Python | def orthogonal_init(gain: float = 1.0, seed=None) -> Callable:
""" Orthogonal initializer
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
!!! cite "References"
1. [Exact solutions to the nonlinear dynamics of learning in deep linear neural networks](https://openreview.net/forum?id=_wzZwKpTDF_9C)
Args:
gain (float): multiplicative factor to apply to the orthogonal matrix
seed (int32/int64): seed for random number generator
Returns:
initializer (Callable): callable that creates an orthogonal matrix from a given shape
"""
return tf.initializers.orthogonal(gain=gain, seed=seed) | def orthogonal_init(gain: float = 1.0, seed=None) -> Callable:
""" Orthogonal initializer
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
!!! cite "References"
1. [Exact solutions to the nonlinear dynamics of learning in deep linear neural networks](https://openreview.net/forum?id=_wzZwKpTDF_9C)
Args:
gain (float): multiplicative factor to apply to the orthogonal matrix
seed (int32/int64): seed for random number generator
Returns:
initializer (Callable): callable that creates an orthogonal matrix from a given shape
"""
return tf.initializers.orthogonal(gain=gain, seed=seed) |
Python | def identity_init(gain: float = 1.0):
""" Identity Initializer
creates an identity matrix for a 2D shape
Args:
gain (float): multiplicative factor to be applied to the identity matrix
Returns:
initializer (Callable): callable that creates an identity matrix from a given 2D shape
"""
return tf.initializers.identity(gain=gain) | def identity_init(gain: float = 1.0):
""" Identity Initializer
creates an identity matrix for a 2D shape
Args:
gain (float): multiplicative factor to be applied to the identity matrix
Returns:
initializer (Callable): callable that creates an identity matrix from a given 2D shape
"""
return tf.initializers.identity(gain=gain) |
Python | def he_uniform_init(seed=None):
""" He Uniform Initializer
also known as `MSRA` initialization
It draws samples from a uniform distribution within $[-l, l]$ where $l = \\sqrt{\\frac{6}{fan_{in}}}$ where
$fan_{in}$ is the number of input units in the weight tensor.
!!! Cite "References"
1. [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852)
Args:
seed (int32/int64): seed for random number generator
Returns:
initializer (Callable): callable that returns a tensor value from a given shape
"""
return tf.initializers.he_uniform(seed=seed) | def he_uniform_init(seed=None):
""" He Uniform Initializer
also known as `MSRA` initialization
It draws samples from a uniform distribution within $[-l, l]$ where $l = \\sqrt{\\frac{6}{fan_{in}}}$ where
$fan_{in}$ is the number of input units in the weight tensor.
!!! Cite "References"
1. [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852)
Args:
seed (int32/int64): seed for random number generator
Returns:
initializer (Callable): callable that returns a tensor value from a given shape
"""
return tf.initializers.he_uniform(seed=seed) |
Python | def he_normal_init(seed=None):
""" He Normal Initializer
also known as `MSRA` initialization
It draws samples from a truncated normal distribution centered on $0$ with
$stddev = \\sqrt{\\frac{2}{fan_{in}}} where $fan_{in}$ is the number of input units in the weight tensor.
!!! Cite "References"
1. [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852)
Args:
seed (int32/int64): seed for random number generator
Returns:
initializer (Callable): callable that returns a tensor value from a given shape
"""
return tf.initializers.he_normal(seed=seed) | def he_normal_init(seed=None):
""" He Normal Initializer
also known as `MSRA` initialization
It draws samples from a truncated normal distribution centered on $0$ with
$stddev = \\sqrt{\\frac{2}{fan_{in}}} where $fan_{in}$ is the number of input units in the weight tensor.
!!! Cite "References"
1. [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852)
Args:
seed (int32/int64): seed for random number generator
Returns:
initializer (Callable): callable that returns a tensor value from a given shape
"""
return tf.initializers.he_normal(seed=seed) |
Python | def trigger(self, event: Event):
""" Triggers an Event in the scheduler
Args:
event (Event): an event to be triggered by the scheduler to trigger the respective Callbacks
"""
matches = self.matches(event)
for callback in matches:
self.trigger(OnCallback(callback, at=AT.START))
callback(event, self.model, self.props)
self.trigger(OnCallback(callback, at=AT.END)) | def trigger(self, event: Event):
""" Triggers an Event in the scheduler
Args:
event (Event): an event to be triggered by the scheduler to trigger the respective Callbacks
"""
matches = self.matches(event)
for callback in matches:
self.trigger(OnCallback(callback, at=AT.START))
callback(event, self.model, self.props)
self.trigger(OnCallback(callback, at=AT.END)) |
Python | def cosine_distance(tensor1, tensor2, dtype=tf.float32):
""" cosine_distance
Computes the pairwise cosine distance between two non-zero tensors on their last dimension.
The cosine distance is defined as 1 - cosine similarity. With the cosine similarity defined as:
$$
similarity =\\cos (\\theta)=\\frac{\\mathbf{A} \\cdot \\mathbf{B}}{\\|\\mathbf{A}\\|\\|\\mathbf{B}\\|}=\\frac{
\\sum_{i=1}^{n} A_{i} B_{i}}{\\sqrt{\\sum_{i=1}^{n} A_{i}^{2}} \\sqrt{\\sum_{i=1}^{n} B_{i}^{2}}}
$$
Args:
tensor1 (`Tensor`): first tensor
tensor2 (`Tensor`): second tensor
dtype (`DType`): assumed type of both tensors
Returns:
distance (`Tensor`): the pairwise cosine distance between two tensors
"""
tensor1 = tf.convert_to_tensor(tensor1, dtype)
tensor2 = tf.convert_to_tensor(tensor2, dtype)
dot_prod = tf.reduce_sum(tf.multiply(tensor1, tensor2), -1)
norm1 = tf.norm(tensor1, axis=-1)
norm2 = tf.norm(tensor2, axis=-1)
norm12 = norm1 * norm2
cos12 = dot_prod / norm12
sim = tf.where(tf.math.is_nan(cos12), tf.zeros_like(cos12), cos12)
# if we need to correct this to angular distance, acos(1.000001) is nan)
sim = tf.clip_by_value(sim, -1., 1.)
return 1 - sim | def cosine_distance(tensor1, tensor2, dtype=tf.float32):
""" cosine_distance
Computes the pairwise cosine distance between two non-zero tensors on their last dimension.
The cosine distance is defined as 1 - cosine similarity. With the cosine similarity defined as:
$$
similarity =\\cos (\\theta)=\\frac{\\mathbf{A} \\cdot \\mathbf{B}}{\\|\\mathbf{A}\\|\\|\\mathbf{B}\\|}=\\frac{
\\sum_{i=1}^{n} A_{i} B_{i}}{\\sqrt{\\sum_{i=1}^{n} A_{i}^{2}} \\sqrt{\\sum_{i=1}^{n} B_{i}^{2}}}
$$
Args:
tensor1 (`Tensor`): first tensor
tensor2 (`Tensor`): second tensor
dtype (`DType`): assumed type of both tensors
Returns:
distance (`Tensor`): the pairwise cosine distance between two tensors
"""
tensor1 = tf.convert_to_tensor(tensor1, dtype)
tensor2 = tf.convert_to_tensor(tensor2, dtype)
dot_prod = tf.reduce_sum(tf.multiply(tensor1, tensor2), -1)
norm1 = tf.norm(tensor1, axis=-1)
norm2 = tf.norm(tensor2, axis=-1)
norm12 = norm1 * norm2
cos12 = dot_prod / norm12
sim = tf.where(tf.math.is_nan(cos12), tf.zeros_like(cos12), cos12)
# if we need to correct this to angular distance, acos(1.000001) is nan)
sim = tf.clip_by_value(sim, -1., 1.)
return 1 - sim |
Python | def euclidean_distance(tensor1, tensor2):
""" Computes the euclidean distance between two tensors.
The euclidean distance or $L^2$ distance between points $p$ and $q$ is the length of the line segment
connecting them.
$$
distance(q,p) =\\sqrt{\\sum_{i=1}^{n}\\left(q_{i}-p_{i}\\right)^{2}}
$$
Args:
tensor1: a ``Tensor``
tensor2: a ``Tensor``
dim: dimension along which the euclidean distance is computed
Returns:
``Tensor``: a ``Tensor`` with the euclidean distances between the two tensors
"""
tensor1 = tf.convert_to_tensor(tensor1)
tensor2 = tf.convert_to_tensor(tensor2)
distance = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=-1))
return distance | def euclidean_distance(tensor1, tensor2):
""" Computes the euclidean distance between two tensors.
The euclidean distance or $L^2$ distance between points $p$ and $q$ is the length of the line segment
connecting them.
$$
distance(q,p) =\\sqrt{\\sum_{i=1}^{n}\\left(q_{i}-p_{i}\\right)^{2}}
$$
Args:
tensor1: a ``Tensor``
tensor2: a ``Tensor``
dim: dimension along which the euclidean distance is computed
Returns:
``Tensor``: a ``Tensor`` with the euclidean distances between the two tensors
"""
tensor1 = tf.convert_to_tensor(tensor1)
tensor2 = tf.convert_to_tensor(tensor2)
distance = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=-1))
return distance |
Python | def sparse_euclidean_distance(sp_tensor, tensor2):
""" Computes the euclidean distance between two tensors.
Args:
sp_tensor (`Union[Tensor,SparseTensor]`): a tensor or sparse tensor
tensor2 (`Tensor`): a dense tensor
Returns:
distance (`Tensor`): euclidean distances between the two tensors
"""
tensor1 = tf.SparseTensor.from_value(sp_tensor)
if tensor1.values.dtype != tf.float32:
tensor1.values = tf.cast(tensor1.values, tf.float32)
tensor2 = tf.convert_to_tensor(tensor2)
distance = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=-1))
return distance | def sparse_euclidean_distance(sp_tensor, tensor2):
""" Computes the euclidean distance between two tensors.
Args:
sp_tensor (`Union[Tensor,SparseTensor]`): a tensor or sparse tensor
tensor2 (`Tensor`): a dense tensor
Returns:
distance (`Tensor`): euclidean distances between the two tensors
"""
tensor1 = tf.SparseTensor.from_value(sp_tensor)
if tensor1.values.dtype != tf.float32:
tensor1.values = tf.cast(tensor1.values, tf.float32)
tensor2 = tf.convert_to_tensor(tensor2)
distance = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=-1))
return distance |
Python | def pairwise_euclidean_distance(tensor1, tensor2, keepdims=False):
""" Computes the euclidean distance between two tensors.
Args:
tensor1 (`Tensor`): a dense tensor
tensor2 (`Tensor`): a dense tensor
keepdims (`Bool`): if True, the result maintains the dimensions of the original result
Returns:
distance (`Tensor`): euclidean distances between the two tensors
"""
tensor1 = tf.convert_to_tensor(tensor1)
tensor2 = tf.convert_to_tensor(tensor2)
tensor1 = tf.expand_dims(tensor1, 1)
distance = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=-1, keepdims=keepdims))
return distance | def pairwise_euclidean_distance(tensor1, tensor2, keepdims=False):
""" Computes the euclidean distance between two tensors.
Args:
tensor1 (`Tensor`): a dense tensor
tensor2 (`Tensor`): a dense tensor
keepdims (`Bool`): if True, the result maintains the dimensions of the original result
Returns:
distance (`Tensor`): euclidean distances between the two tensors
"""
tensor1 = tf.convert_to_tensor(tensor1)
tensor2 = tf.convert_to_tensor(tensor2)
tensor1 = tf.expand_dims(tensor1, 1)
distance = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=-1, keepdims=keepdims))
return distance |
Python | def torus_l1_distance(point, shape):
""" Computes the l1 distance between a given point or batch of points and all other points in a torus
Args:
point (`Tensor`): a rank 0 or rank 1 tensor with the coordinates for a point or a rank 2 tensor with a batch of points.
shape (`List`): a list with the shape for the torus - either 1D or 2D
Returns:
distances (`Tensor`): a rank 1 or 2 tensor with the distances between each point in the 1D torus and each unique
coordinate in the shape
Examples:
* distance for a single point
`torus_l1_distance(1,[4])`
or
`torus_1d_l1_distance([1],[4])`
```python
[ 1., 0., 1., 2.]
```
* distance for multiple points `torus_l1_distance([[2],[3]],[4])`
```python
[[ 2., 1., 0., 1.],
[ 1., 2., 1., 0.]]
```
* distance between a point and other coordinates in a 2D torus
```python
r = torus_l1_distance([[1,1],[1,2]],[3,3])
np.reshape(r,[-1,3,3])
[[[ 2., 1., 2.],
[ 1., 0., 1.],
[ 2., 1., 2.]],
[[ 2., 2., 1.],
[ 1., 1., 0.],
[ 2., 2., 1.]]]
```
"""
point = as_tensor(point, tf.float32)
if len(shape) == 1:
max_x = shape[0]
coor_x = tf.range(0, max_x, 1, dtype=tf.float32)
dx = tf.abs(point - coor_x)
distance = tf.minimum(dx, tf.math.mod(-dx, max_x))
elif len(shape) == 2:
max_x = shape[0]
max_y = shape[1]
xys = grid_2d(shape)
xys = tf.cast(xys, tf.float32)
xs, ys = tf.unstack(xys, num=2, axis=-1)
px, py = tf.unstack(point, num=2, axis=-1)
px = tf.expand_dims(px, 1)
py = tf.expand_dims(py, 1)
dx = tf.abs(px - xs)
dy = tf.abs(py - ys)
dx = tf.minimum(dx, tf.math.mod(-dx, max_x))
dy = tf.minimum(dy, tf.math.mod(-dy, max_y))
distance = dx + dy
else:
raise ValueError("Invalid shape parameter, shape must have len 1 or 2")
return distance | def torus_l1_distance(point, shape):
""" Computes the l1 distance between a given point or batch of points and all other points in a torus
Args:
point (`Tensor`): a rank 0 or rank 1 tensor with the coordinates for a point or a rank 2 tensor with a batch of points.
shape (`List`): a list with the shape for the torus - either 1D or 2D
Returns:
distances (`Tensor`): a rank 1 or 2 tensor with the distances between each point in the 1D torus and each unique
coordinate in the shape
Examples:
* distance for a single point
`torus_l1_distance(1,[4])`
or
`torus_1d_l1_distance([1],[4])`
```python
[ 1., 0., 1., 2.]
```
* distance for multiple points `torus_l1_distance([[2],[3]],[4])`
```python
[[ 2., 1., 0., 1.],
[ 1., 2., 1., 0.]]
```
* distance between a point and other coordinates in a 2D torus
```python
r = torus_l1_distance([[1,1],[1,2]],[3,3])
np.reshape(r,[-1,3,3])
[[[ 2., 1., 2.],
[ 1., 0., 1.],
[ 2., 1., 2.]],
[[ 2., 2., 1.],
[ 1., 1., 0.],
[ 2., 2., 1.]]]
```
"""
point = as_tensor(point, tf.float32)
if len(shape) == 1:
max_x = shape[0]
coor_x = tf.range(0, max_x, 1, dtype=tf.float32)
dx = tf.abs(point - coor_x)
distance = tf.minimum(dx, tf.math.mod(-dx, max_x))
elif len(shape) == 2:
max_x = shape[0]
max_y = shape[1]
xys = grid_2d(shape)
xys = tf.cast(xys, tf.float32)
xs, ys = tf.unstack(xys, num=2, axis=-1)
px, py = tf.unstack(point, num=2, axis=-1)
px = tf.expand_dims(px, 1)
py = tf.expand_dims(py, 1)
dx = tf.abs(px - xs)
dy = tf.abs(py - ys)
dx = tf.minimum(dx, tf.math.mod(-dx, max_x))
dy = tf.minimum(dy, tf.math.mod(-dy, max_y))
distance = dx + dy
else:
raise ValueError("Invalid shape parameter, shape must have len 1 or 2")
return distance |
Python | def batch_manhattan_distance(tensor1, tensor2, keepdims=False):
""" Compute the pairwise manhattan distance between a batch of tensors and a second tensor
If any tensor is a ``SparseTensor``, it is converted to
Args:
tensor1 (`Union[Tensor,SparseTensor]`): a batch of tensors or sparse tensor
tensor2 (`Union[Tensor,SparseTensor]`): another tensor or a sparse tensor
keepdims (`Bool`): if True keeps the dimensions of the original tensors
Returns:
distance (`Tensor`): the manhattan distance between the two tensors
"""
tensor1 = as_tensor(tensor1)
tensor2 = as_tensor(tensor2)
if isinstance(tensor1, tf.SparseTensor):
tensor1 = tf.sparse.to_dense(tensor1)
if isinstance(tensor2, tf.SparseTensor):
tensor2 = tf.sparse.to_dense(tensor2)
tensor1 = tf.expand_dims(tensor1, 1)
abs_diff = tf.abs(tf.subtract(tensor1, tensor2))
return tf.reduce_sum(abs_diff, axis=-1, keepdims=keepdims) | def batch_manhattan_distance(tensor1, tensor2, keepdims=False):
""" Compute the pairwise manhattan distance between a batch of tensors and a second tensor
If any tensor is a ``SparseTensor``, it is converted to
Args:
tensor1 (`Union[Tensor,SparseTensor]`): a batch of tensors or sparse tensor
tensor2 (`Union[Tensor,SparseTensor]`): another tensor or a sparse tensor
keepdims (`Bool`): if True keeps the dimensions of the original tensors
Returns:
distance (`Tensor`): the manhattan distance between the two tensors
"""
tensor1 = as_tensor(tensor1)
tensor2 = as_tensor(tensor2)
if isinstance(tensor1, tf.SparseTensor):
tensor1 = tf.sparse.to_dense(tensor1)
if isinstance(tensor2, tf.SparseTensor):
tensor2 = tf.sparse.to_dense(tensor2)
tensor1 = tf.expand_dims(tensor1, 1)
abs_diff = tf.abs(tf.subtract(tensor1, tensor2))
return tf.reduce_sum(abs_diff, axis=-1, keepdims=keepdims) |
Python | def batch_sparse_cosine_distance(sp_tensor, tensor, dtype=tf.float32, keepdims=False):
""" Computes the cosine distance between two non-zero `SparseTensor` and `Tensor`
Warning:
1 - cosine similarity is not a proper distance metric, to repair the triangle inequality property while
maintaining the same ordering, it is necessary to convert to angular distance
Args:
sp_tensor: a `SparseTensor`
tensor: a `Tensor`
dtype:
keepdims: keeps the original dimension of the input tensor
Returns:
a `Tensor` with the cosine distance between two tensors
"""
sp_tensor = as_tensor(sp_tensor, dtype)
tensor = tf.convert_to_tensor(tensor, dtype)
dot_prod = batch_sparse_dot(sp_tensor, tensor, keepdims=keepdims)
norm1 = sparse_l2_norm(sp_tensor, axis=-1, keepdims=True)
norm2 = tf.norm(tensor, axis=-1)
norm12 = norm1 * norm2
if keepdims:
norm12 = tf.expand_dims(norm12, -1)
cos12 = dot_prod / norm12
sim = tf.where(tf.math.is_nan(cos12), tf.zeros_like(cos12), cos12)
sim = tf.clip_by_value(sim, -1., 1.)
return 1 - sim | def batch_sparse_cosine_distance(sp_tensor, tensor, dtype=tf.float32, keepdims=False):
""" Computes the cosine distance between two non-zero `SparseTensor` and `Tensor`
Warning:
1 - cosine similarity is not a proper distance metric, to repair the triangle inequality property while
maintaining the same ordering, it is necessary to convert to angular distance
Args:
sp_tensor: a `SparseTensor`
tensor: a `Tensor`
dtype:
keepdims: keeps the original dimension of the input tensor
Returns:
a `Tensor` with the cosine distance between two tensors
"""
sp_tensor = as_tensor(sp_tensor, dtype)
tensor = tf.convert_to_tensor(tensor, dtype)
dot_prod = batch_sparse_dot(sp_tensor, tensor, keepdims=keepdims)
norm1 = sparse_l2_norm(sp_tensor, axis=-1, keepdims=True)
norm2 = tf.norm(tensor, axis=-1)
norm12 = norm1 * norm2
if keepdims:
norm12 = tf.expand_dims(norm12, -1)
cos12 = dot_prod / norm12
sim = tf.where(tf.math.is_nan(cos12), tf.zeros_like(cos12), cos12)
sim = tf.clip_by_value(sim, -1., 1.)
return 1 - sim |
Python | def sinkhorn(tensor1, tensor2, epsilon, n_iter, cost_fn=None):
""" Sinkhorn Distance
!!! info
Optimal Transport (OT) provides a framework from which one can define a more powerful geometry to compare
probability distributions. This power comes, however, with a heavy computational price. The cost of computing OT
distances scales at least in $O(d^3 log(d))$ when comparing two histograms of dimension $d$. Sinkhorn algorithm
alleviate this problem by solving an regularized OT in linear time.
Given two measures with n points each with locations x and y
outputs an approximation of the Optimal Transport (OT) cost with regularization
parameter epsilon, niter is the maximum number of steps in sinkhorn loop
!!! cite "References" 1. [Concerning nonnegative matrices and doubly stochastic matrices](
https://msp.org/pjm/1967/21-2/p14.xhtml) 2. [Sinkhorn Distances:Lightspeed Computation of Optimal Transport](
https://papers.nips.cc/paper/4927-sinkhorn-distances-lightspeed-computation-of-optimal-transport.pdf)
Args:
tensor1 (`Tensor`): a tensor representing a distribution
tensor2 (`Tensor`): other tensor with another distribution
epsilon (float): regularization term >0
n_iter (`int`): number of sinkhorn iterations
cost_fn (`Callable`): function that returns the cost matrix between y_pred and y_true, defaults to $|x_i-y_j|^p$.
Returns:
cost (`Tensor`): sinkhorn cost of moving from the mass from the model distribution `y_pred` to the empirical
distribution `y_true`.
"""
def cost_matrix(x, y, p=2):
""" cost matrix of $|x_i-y_j|^p$.
"""
xc = tf.expand_dims(x, 1)
yr = tf.expand_dims(y, 0)
d = tf.math.pow(tf.abs(xc - yr), p)
return tf.reduce_sum(d, axis=-1)
# n x n Wasserstein cost function
if cost_fn is None:
cost_m = cost_matrix(tensor1, tensor2)
else:
cost_m = cost_fn(tensor1, tensor2)
# both marginals are fixed with equal weights
# mu = Variable(1. / n * tf.ones([n], dtype=tf.float32))
# nu = Variable(1. / n * tf.ones([n], dtype=tf.float32))
n = tf.shape(tensor1)[0]
init_v = tf.cast(n, tf.float32) * tf.ones([n], dtype=tf.float32)
mu = 1. / init_v
nu = 1. / init_v
# Parameters of the sinkhorn algorithm.
rho = 1 # (.5) **2 # unbalanced transport
tau = -.8 # nesterov-like acceleration
lam = rho / (rho + epsilon) # Update exponent
thresh = 0.1 # stopping criterion
# Elementary operations .....................................................................
def ave(u, u1):
# Barycenter subroutine, used by kinetic acceleration through extrapolation.
return tau * u + (1 - tau) * u1
def M(u, v):
# Modified cost for logarithmic updates $M_{ij} = (-c_{ij} + u_i + v_j) / \epsilon$
return (-cost_m + tf.expand_dims(u, 1) + tf.expand_dims(v, 0)) / epsilon
def lse(A):
# log-sum-exp
return tf.reduce_logsumexp(A, axis=1, keepdims=True)
# Actual Sinkhorn loop ......................................................................
init_u, init_v, init_err = 0. * mu, 0. * nu, 0.
actual_nits = 0 # to check if algorithm terminates because of threshold or max iterations reached
def body(i, u, v, _):
u0 = u # to check the error threshold
new_u = epsilon * (tf.math.log(mu) - tf.squeeze(lse(M(u, v)))) + u
new_v = epsilon * (tf.math.log(nu) - tf.squeeze(lse(tf.transpose(M(new_u, v))))) + v
# accelerated unbalanced iterations
# u = ave( u, lam * ( epsilon * ( torch.log(mu) - lse(M(u,v)).squeeze() ) + u ) )
# v = ave( v, lam * ( epsilon * ( torch.log(nu) - lse(M(u,v).t()).squeeze() ) + v ) )
error = tf.reduce_sum(tf.abs(new_u - u0))
return i + 1, new_u, new_v, error
def cond(i, u, v, err):
return tf.logical_and(tf.less(err, thresh), tf.less(i, n_iter))
i, u, v, err = tf.while_loop(cond=cond,
body=body,
loop_vars=(0, init_u, init_v, init_err))
pi = tf.exp(M(u, v)) # Transport plan p_i = diag(a)*K*diag(b)
return tf.reduce_sum(pi * cost_m) | def sinkhorn(tensor1, tensor2, epsilon, n_iter, cost_fn=None):
""" Sinkhorn Distance
!!! info
Optimal Transport (OT) provides a framework from which one can define a more powerful geometry to compare
probability distributions. This power comes, however, with a heavy computational price. The cost of computing OT
distances scales at least in $O(d^3 log(d))$ when comparing two histograms of dimension $d$. Sinkhorn algorithm
alleviate this problem by solving an regularized OT in linear time.
Given two measures with n points each with locations x and y
outputs an approximation of the Optimal Transport (OT) cost with regularization
parameter epsilon, niter is the maximum number of steps in sinkhorn loop
!!! cite "References" 1. [Concerning nonnegative matrices and doubly stochastic matrices](
https://msp.org/pjm/1967/21-2/p14.xhtml) 2. [Sinkhorn Distances:Lightspeed Computation of Optimal Transport](
https://papers.nips.cc/paper/4927-sinkhorn-distances-lightspeed-computation-of-optimal-transport.pdf)
Args:
tensor1 (`Tensor`): a tensor representing a distribution
tensor2 (`Tensor`): other tensor with another distribution
epsilon (float): regularization term >0
n_iter (`int`): number of sinkhorn iterations
cost_fn (`Callable`): function that returns the cost matrix between y_pred and y_true, defaults to $|x_i-y_j|^p$.
Returns:
cost (`Tensor`): sinkhorn cost of moving from the mass from the model distribution `y_pred` to the empirical
distribution `y_true`.
"""
def cost_matrix(x, y, p=2):
""" cost matrix of $|x_i-y_j|^p$.
"""
xc = tf.expand_dims(x, 1)
yr = tf.expand_dims(y, 0)
d = tf.math.pow(tf.abs(xc - yr), p)
return tf.reduce_sum(d, axis=-1)
# n x n Wasserstein cost function
if cost_fn is None:
cost_m = cost_matrix(tensor1, tensor2)
else:
cost_m = cost_fn(tensor1, tensor2)
# both marginals are fixed with equal weights
# mu = Variable(1. / n * tf.ones([n], dtype=tf.float32))
# nu = Variable(1. / n * tf.ones([n], dtype=tf.float32))
n = tf.shape(tensor1)[0]
init_v = tf.cast(n, tf.float32) * tf.ones([n], dtype=tf.float32)
mu = 1. / init_v
nu = 1. / init_v
# Parameters of the sinkhorn algorithm.
rho = 1 # (.5) **2 # unbalanced transport
tau = -.8 # nesterov-like acceleration
lam = rho / (rho + epsilon) # Update exponent
thresh = 0.1 # stopping criterion
# Elementary operations .....................................................................
def ave(u, u1):
# Barycenter subroutine, used by kinetic acceleration through extrapolation.
return tau * u + (1 - tau) * u1
def M(u, v):
# Modified cost for logarithmic updates $M_{ij} = (-c_{ij} + u_i + v_j) / \epsilon$
return (-cost_m + tf.expand_dims(u, 1) + tf.expand_dims(v, 0)) / epsilon
def lse(A):
# log-sum-exp
return tf.reduce_logsumexp(A, axis=1, keepdims=True)
# Actual Sinkhorn loop ......................................................................
init_u, init_v, init_err = 0. * mu, 0. * nu, 0.
actual_nits = 0 # to check if algorithm terminates because of threshold or max iterations reached
def body(i, u, v, _):
u0 = u # to check the error threshold
new_u = epsilon * (tf.math.log(mu) - tf.squeeze(lse(M(u, v)))) + u
new_v = epsilon * (tf.math.log(nu) - tf.squeeze(lse(tf.transpose(M(new_u, v))))) + v
# accelerated unbalanced iterations
# u = ave( u, lam * ( epsilon * ( torch.log(mu) - lse(M(u,v)).squeeze() ) + u ) )
# v = ave( v, lam * ( epsilon * ( torch.log(nu) - lse(M(u,v).t()).squeeze() ) + v ) )
error = tf.reduce_sum(tf.abs(new_u - u0))
return i + 1, new_u, new_v, error
def cond(i, u, v, err):
return tf.logical_and(tf.less(err, thresh), tf.less(i, n_iter))
i, u, v, err = tf.while_loop(cond=cond,
body=body,
loop_vars=(0, init_u, init_v, init_err))
pi = tf.exp(M(u, v)) # Transport plan p_i = diag(a)*K*diag(b)
return tf.reduce_sum(pi * cost_m) |
Python | def gumbel_top(logits, num_samples, dtype=tf.int32, seed=None):
""" gumbel_top sampling
uses the Gumbel-Top trick to sample without replacement from a discrete probability distribution
parameterized by given (possibly unnormalized) log-probabilities `logits`.
Args:
logits (`Tensor`): log probabilities parameterizing the discrete distribution
num_samples (int): number of unique samples to draw from the distribution
dtype (`DType`): output dtype
seed (int): random seed
Returns:
samples (int): a tensor with the indices sampled from the target distribution with shape
`[shape(logits)[0],num_samples]`.
"""
with tf.name_scope("gumbel_top"):
shape = tf.shape(logits)
u = tf.random.uniform(shape, minval=0, maxval=1, seed=seed)
g = -tf.math.log(-tf.math.log(u))
z = u + g
_, indices = tf.nn.top_k(z, k=num_samples)
if indices.dtype != dtype:
indices = tf.cast(indices, dtype)
return indices | def gumbel_top(logits, num_samples, dtype=tf.int32, seed=None):
""" gumbel_top sampling
uses the Gumbel-Top trick to sample without replacement from a discrete probability distribution
parameterized by given (possibly unnormalized) log-probabilities `logits`.
Args:
logits (`Tensor`): log probabilities parameterizing the discrete distribution
num_samples (int): number of unique samples to draw from the distribution
dtype (`DType`): output dtype
seed (int): random seed
Returns:
samples (int): a tensor with the indices sampled from the target distribution with shape
`[shape(logits)[0],num_samples]`.
"""
with tf.name_scope("gumbel_top"):
shape = tf.shape(logits)
u = tf.random.uniform(shape, minval=0, maxval=1, seed=seed)
g = -tf.math.log(-tf.math.log(u))
z = u + g
_, indices = tf.nn.top_k(z, k=num_samples)
if indices.dtype != dtype:
indices = tf.cast(indices, dtype)
return indices |
Python | def sample_sigmoid(logits, n, dtype=None, seed=None, name="sample_sigmoid"):
""" sample_sigmoid
Efficient sampling Bernoulli random variable from a sigmoid defined distribution
!!! info
This can be applied to the output layer of a neural net if this represents a bernoulli
distribution defined using a parameterized sigmoid-activated layer
Args:
logits (`Tensor`): logits
n (`int`): number of samples per row of logits
dtype (`tf.DType`): input Tensor dtype
seed (`int`): random number generator seed
name (`Optional[str]`): name for sample sigmoid op (optional)
Returns:
samples (`Tensor`): a tensor with samples
"""
with tf.name_scope(name):
logits = as_tensor(logits)
if dtype is not None:
logits = tf.cast(logits, dtype)
n = as_tensor(n)
shape = tf.shape(logits)
sample_shape = tf.concat([[n], shape], axis=-1)
uniform_sample = tf.random.uniform(sample_shape,
minval=0,
maxval=1,
dtype=logits.dtype, seed=seed)
z = logit(uniform_sample, dtype=logits.dtype)
return tf.cast(tf.greater(logits, z), tf.float32) | def sample_sigmoid(logits, n, dtype=None, seed=None, name="sample_sigmoid"):
""" sample_sigmoid
Efficient sampling Bernoulli random variable from a sigmoid defined distribution
!!! info
This can be applied to the output layer of a neural net if this represents a bernoulli
distribution defined using a parameterized sigmoid-activated layer
Args:
logits (`Tensor`): logits
n (`int`): number of samples per row of logits
dtype (`tf.DType`): input Tensor dtype
seed (`int`): random number generator seed
name (`Optional[str]`): name for sample sigmoid op (optional)
Returns:
samples (`Tensor`): a tensor with samples
"""
with tf.name_scope(name):
logits = as_tensor(logits)
if dtype is not None:
logits = tf.cast(logits, dtype)
n = as_tensor(n)
shape = tf.shape(logits)
sample_shape = tf.concat([[n], shape], axis=-1)
uniform_sample = tf.random.uniform(sample_shape,
minval=0,
maxval=1,
dtype=logits.dtype, seed=seed)
z = logit(uniform_sample, dtype=logits.dtype)
return tf.cast(tf.greater(logits, z), tf.float32) |
Python | def add_edge(self, node1, node2):
""" Adds a new edge to the graph
also removes nodes from input roots or outputs to reflect the current edge if necessary.
Args:
node1 (`Node`): starting node
node2 (`Node`): ending node
"""
self.add_node(node1)
self.add_node(node2)
self.edges_out[node1].append(node2)
self.edges_in[node2].append(node1)
# update endpoints
if node1 in self.out_nodes:
del self.out_nodes[node1]
if node2 in self.in_nodes:
del self.in_nodes[node2] | def add_edge(self, node1, node2):
""" Adds a new edge to the graph
also removes nodes from input roots or outputs to reflect the current edge if necessary.
Args:
node1 (`Node`): starting node
node2 (`Node`): ending node
"""
self.add_node(node1)
self.add_node(node2)
self.edges_out[node1].append(node2)
self.edges_in[node2].append(node1)
# update endpoints
if node1 in self.out_nodes:
del self.out_nodes[node1]
if node2 in self.in_nodes:
del self.in_nodes[node2] |
Python | def dependency_iter(self):
""" returns a dictionary with a map from nodes to dependency priorities
with lower values having higher priority. Keys are ordered by priority from
lower to higher and number of dependencies from lower to higher
Notes:
Transversing a graph by priority guarantees that when we visit a node
all it's dependencies have already been visited, additionally, ordering by
number of dependencies guarantees that we can maintain a minimum result
cache when transversing the graph.
Returns:
nodes (`dict`): dictionary from nodes to (priorities,number of dependencies)
"""
priority = dict()
visited = set()
nodes = list(self.in_nodes)
while nodes:
current = nodes.pop(0)
visited.add(current)
delayed = False
if not self.edges_in[current]:
priority[current] = (0, len(self.edges_out[current]))
else:
# delay node if not all dependencies are ready
if any([dep not in priority for dep in self.edges_in[current]]):
nodes.append(current)
delayed = True
else:
priorities = [priority[dep][0] for dep in self.edges_in[current]]
priority[current] = (max(priorities) + 1, len(self.edges_out[current]))
if not delayed:
next_nodes = self.edges_out[current]
for next_node in next_nodes:
if next_node not in visited:
nodes.insert(0, next_node)
sorted_priority = dict(sorted(priority.items(), key=lambda kv: kv[1], reverse=False))
return sorted_priority | def dependency_iter(self):
""" returns a dictionary with a map from nodes to dependency priorities
with lower values having higher priority. Keys are ordered by priority from
lower to higher and number of dependencies from lower to higher
Notes:
Transversing a graph by priority guarantees that when we visit a node
all it's dependencies have already been visited, additionally, ordering by
number of dependencies guarantees that we can maintain a minimum result
cache when transversing the graph.
Returns:
nodes (`dict`): dictionary from nodes to (priorities,number of dependencies)
"""
priority = dict()
visited = set()
nodes = list(self.in_nodes)
while nodes:
current = nodes.pop(0)
visited.add(current)
delayed = False
if not self.edges_in[current]:
priority[current] = (0, len(self.edges_out[current]))
else:
# delay node if not all dependencies are ready
if any([dep not in priority for dep in self.edges_in[current]]):
nodes.append(current)
delayed = True
else:
priorities = [priority[dep][0] for dep in self.edges_in[current]]
priority[current] = (max(priorities) + 1, len(self.edges_out[current]))
if not delayed:
next_nodes = self.edges_out[current]
for next_node in next_nodes:
if next_node not in visited:
nodes.insert(0, next_node)
sorted_priority = dict(sorted(priority.items(), key=lambda kv: kv[1], reverse=False))
return sorted_priority |
Python | def build(inputs, outputs, add_missing_inputs=False):
""" build_graph
!!! note
use `add_missing_inputs` if you have graph inputs but might have other dependencies that might not have
been created explicitly. Example: in an RNN layer, if a previous state is not passed explicitly, a default
one is created by the layer and stored in input layers. You might be aware of this input node to a graph but
not want to pass it explicitly to inputs.
Args:
inputs: input terminal layers where the graph must stop
outputs: output layers from which we start to populate the graph
add_missing_inputs: if True and `inputs` are provided, input nodes found that are not in given inputs
will be added to the graph. If False ValueError is raised with a list of inputs not specified
(missing dependencies).
Returns:
graph (`Graph`): a graph from the outputs to the given input, or to every input found if these are not
specified.
"""
graph = Graph()
inputs = dict.fromkeys(as_list(inputs))
graph.outputs = as_list(outputs)
outputs = dict.fromkeys(as_list(outputs))
# add terminals to the graph
# for layer in input_layers:
# graph.add_node(layer)
for layer in outputs:
graph.add_node(layer)
dependencies = dict()
missing_dependencies = dict()
def add_dep(out, dep, target: dict):
if out not in target:
target[out] = set()
target[out].add(dep)
# arg order in a path to the output
arg_ord = {out: (0,) for out in outputs}
visited = set()
node_queue = list(zip(outputs, outputs))
while node_queue:
current_node, target_output = node_queue.pop(0)
if current_node not in visited:
next_nodes = current_node.inputs
if not next_nodes:
add_dep(target_output, current_node, dependencies)
if len(inputs) > 0 and current_node not in inputs and not add_missing_inputs:
add_dep(target_output, current_node, missing_dependencies)
else:
if current_node in inputs:
add_dep(target_output, current_node, dependencies)
else:
for i, input_node in enumerate(next_nodes):
graph.add_edge(input_node, current_node)
node_queue.append((input_node, target_output))
# record arg order
if input_node not in arg_ord:
arg_ord[input_node] = arg_ord[current_node] + (i + 1,)
visited.add(current_node)
if any(missing_dependencies) and not add_missing_inputs:
failed_str = []
for output_layer in missing_dependencies:
missing_str = "\n\t\t".join(map(str, missing_dependencies[output_layer]))
failed_str.append(f"\t{str(output_layer)}: \n\t\t{missing_str}")
failed_str = "\n".join(failed_str)
raise ValueError(f"output layers missing inputs: \n {failed_str}")
if inputs:
missing_from_graph = list(filter(lambda x: x not in graph.in_nodes, inputs))
if missing_from_graph:
input_str = "\n\t ".join(map(str, missing_from_graph))
output_str = "\n\t ".join(map(str, outputs))
raise ValueError(f"no path between the output layers:\n\t {output_str} \n and input layers: \n\t"
f"{input_str}")
inputs.update(graph.in_nodes)
outputs.update(graph.out_nodes)
# if no ordered input is given
# re-order by argument ordering
if not inputs:
# sort according to argument ordering from the outputs
sorted_inputs = sorted(inputs, key=lambda in_layer: arg_ord[in_layer], reverse=False)
inputs = dict.fromkeys(sorted_inputs)
graph.in_nodes = inputs
graph.out_nodes = outputs
return graph | def build(inputs, outputs, add_missing_inputs=False):
""" build_graph
!!! note
use `add_missing_inputs` if you have graph inputs but might have other dependencies that might not have
been created explicitly. Example: in an RNN layer, if a previous state is not passed explicitly, a default
one is created by the layer and stored in input layers. You might be aware of this input node to a graph but
not want to pass it explicitly to inputs.
Args:
inputs: input terminal layers where the graph must stop
outputs: output layers from which we start to populate the graph
add_missing_inputs: if True and `inputs` are provided, input nodes found that are not in given inputs
will be added to the graph. If False ValueError is raised with a list of inputs not specified
(missing dependencies).
Returns:
graph (`Graph`): a graph from the outputs to the given input, or to every input found if these are not
specified.
"""
graph = Graph()
inputs = dict.fromkeys(as_list(inputs))
graph.outputs = as_list(outputs)
outputs = dict.fromkeys(as_list(outputs))
# add terminals to the graph
# for layer in input_layers:
# graph.add_node(layer)
for layer in outputs:
graph.add_node(layer)
dependencies = dict()
missing_dependencies = dict()
def add_dep(out, dep, target: dict):
if out not in target:
target[out] = set()
target[out].add(dep)
# arg order in a path to the output
arg_ord = {out: (0,) for out in outputs}
visited = set()
node_queue = list(zip(outputs, outputs))
while node_queue:
current_node, target_output = node_queue.pop(0)
if current_node not in visited:
next_nodes = current_node.inputs
if not next_nodes:
add_dep(target_output, current_node, dependencies)
if len(inputs) > 0 and current_node not in inputs and not add_missing_inputs:
add_dep(target_output, current_node, missing_dependencies)
else:
if current_node in inputs:
add_dep(target_output, current_node, dependencies)
else:
for i, input_node in enumerate(next_nodes):
graph.add_edge(input_node, current_node)
node_queue.append((input_node, target_output))
# record arg order
if input_node not in arg_ord:
arg_ord[input_node] = arg_ord[current_node] + (i + 1,)
visited.add(current_node)
if any(missing_dependencies) and not add_missing_inputs:
failed_str = []
for output_layer in missing_dependencies:
missing_str = "\n\t\t".join(map(str, missing_dependencies[output_layer]))
failed_str.append(f"\t{str(output_layer)}: \n\t\t{missing_str}")
failed_str = "\n".join(failed_str)
raise ValueError(f"output layers missing inputs: \n {failed_str}")
if inputs:
missing_from_graph = list(filter(lambda x: x not in graph.in_nodes, inputs))
if missing_from_graph:
input_str = "\n\t ".join(map(str, missing_from_graph))
output_str = "\n\t ".join(map(str, outputs))
raise ValueError(f"no path between the output layers:\n\t {output_str} \n and input layers: \n\t"
f"{input_str}")
inputs.update(graph.in_nodes)
outputs.update(graph.out_nodes)
# if no ordered input is given
# re-order by argument ordering
if not inputs:
# sort according to argument ordering from the outputs
sorted_inputs = sorted(inputs, key=lambda in_layer: arg_ord[in_layer], reverse=False)
inputs = dict.fromkeys(sorted_inputs)
graph.in_nodes = inputs
graph.out_nodes = outputs
return graph |
Python | def as_function(self, ord_inputs=None, ord_outputs=None, name="compiled_graph", compile=True):
""" compiles the graph into a tensorflow callable compiled graph
Converts the current graph into a function with a series of `layer.compute(*tensors)` calls
and uses `tf.function` to compile this function to a Tensorflow static graph if compile is `True`.
The resulting function is a closure with access to layer objects, to TensorFlow should be able to
trace the computations for each layer `compute` call.
Another way to feed inputs to a graph is to use input layers and change the value, if the graphs are created
without inputs, but the terminal input nodes are Dynamic Inputs, the execution of those layers is a read
on their placeholder value, which you can change that value before calling the graph and the output will be
correct.
```python
input_layer.value = in0
input_Layer.value = in1
outputs = graph()
```
this adds a bit of a overhead since we have to write to the variable
!!! bug "Dev Note"
* makes use of `dependency_iter` to create the computation calls such that when we call compute all the
inputs needed as dependencies are already available.
Args:
ord_inputs (`List[Node]`): list of input that determines the order of resulting function arguments
ord_outputs (`List[Node`]): list of outputs used to determine the return order
name (`str`): function name, must be a valid python function name
compile (`bool`): if True, returns a tensorflow graph else returns a python function
Returns:
function (`Callable`): an optimized TensorFlow static graph as a callable function or a python function
"""
clean = lambda name_str: re.sub(r"\W|^(?=\d)", "_", name_str)
name = clean(name)
graph = self
if not graph.out_nodes:
raise ValueError("can't compile an empty graph")
ord_inputs = as_list(ord_inputs)
ord_outputs = as_list(ord_outputs)
ord_nodes = list(self.dependency_iter())
input_set: set = set(graph.in_nodes)
if ord_inputs and not input_set.issuperset(ord_inputs):
raise ValueError("all feedable_inputs must be part of the graph inputs")
output_set: set = set(graph.out_nodes)
if ord_outputs and len(output_set.difference(ord_outputs)) > 0:
raise ValueError("all outputs must be part of the graph outputs")
# if no input order is specified use the graph endpoint order
outputs = dict.fromkeys(ord_outputs) if ord_outputs else graph.out_nodes
# if we don't provide inputs it will just treat them as callables
inputs = dict.fromkeys(ord_inputs) if ord_inputs else [] # graph.in_nodes
# check if they are all dynamic inputs
# in py3.7 the dict is an ordered set if we convert it back to a list
node_index = list(range(len(graph.nodes)))
feedable_inputs = list(inputs)
node_map = {}
for in_layer in feedable_inputs:
layer_i = node_index.pop(0)
in_name = in_layer.name.replace('/', '__')
layer_name = f"{in_name}_{layer_i}"
node_map[in_layer] = layer_name
args_str = ", ".join(node_map.values())
def_str = f"def {name}({args_str}):\n"
other_str = []
# all other inputs that are not feedable
other_inputs = list(input_set.difference(feedable_inputs))
node_map.update({in_layer: f"{in_layer.name}_{node_index.pop(0)}" for in_layer in other_inputs})
# requires outer access to layers var
for x in other_inputs:
other_str.append(f"\t{node_map[x]} = layers[\"{node_map[x]}\"].compute()")
other_str = "\n".join(other_str) + "\n" if other_str else ""
# remove inputs
# node_map contains input_nodes at this point
for _ in range(len(node_map)):
ord_nodes.pop(0)
compute_str = []
for current_node in ord_nodes:
node_name = current_node.name.replace('/', '__')
node_map[current_node] = f"{node_name}_{node_index.pop(0)}"
node_name = node_map[current_node]
# when layers have the same layer repeated as input, this causes problems
# it's better to use the same input_layers as declared in the graph
# dict from keys creates an ordered set which is not what we want
# next_nodes = dict.fromkeys(graph.edges_in[current_node])
next_nodes = graph.edges_in[current_node]
in_args = ", ".join([node_map[node] for node in next_nodes])
compute_str.append(f"\t{node_name} = layers[\"{node_name}\"].compute({in_args})")
compute_str = "\n".join(compute_str)
return_str = "\n\treturn {output_str}\n".format(output_str=", ".join([node_map[out] for out in outputs]))
full_fn_str = def_str + other_str + compute_str + return_str
logger.log(logging.DEBUG, f"converted function:\n {'-' * 10}\n\n {full_fn_str} \n{'-' * 10}")
# layer map (for the closure above)
# we feed the locals so that layers gets available in the above function
layers = {v: k for k, v in node_map.items()}
exec(full_fn_str, locals())
fn = eval(name)
fn.__doc__ = f"""{name}\n```python\n{full_fn_str}\n```"""
if compile:
fn = tf.function(fn)
return fn | def as_function(self, ord_inputs=None, ord_outputs=None, name="compiled_graph", compile=True):
""" compiles the graph into a tensorflow callable compiled graph
Converts the current graph into a function with a series of `layer.compute(*tensors)` calls
and uses `tf.function` to compile this function to a Tensorflow static graph if compile is `True`.
The resulting function is a closure with access to layer objects, to TensorFlow should be able to
trace the computations for each layer `compute` call.
Another way to feed inputs to a graph is to use input layers and change the value, if the graphs are created
without inputs, but the terminal input nodes are Dynamic Inputs, the execution of those layers is a read
on their placeholder value, which you can change that value before calling the graph and the output will be
correct.
```python
input_layer.value = in0
input_Layer.value = in1
outputs = graph()
```
this adds a bit of a overhead since we have to write to the variable
!!! bug "Dev Note"
* makes use of `dependency_iter` to create the computation calls such that when we call compute all the
inputs needed as dependencies are already available.
Args:
ord_inputs (`List[Node]`): list of input that determines the order of resulting function arguments
ord_outputs (`List[Node`]): list of outputs used to determine the return order
name (`str`): function name, must be a valid python function name
compile (`bool`): if True, returns a tensorflow graph else returns a python function
Returns:
function (`Callable`): an optimized TensorFlow static graph as a callable function or a python function
"""
clean = lambda name_str: re.sub(r"\W|^(?=\d)", "_", name_str)
name = clean(name)
graph = self
if not graph.out_nodes:
raise ValueError("can't compile an empty graph")
ord_inputs = as_list(ord_inputs)
ord_outputs = as_list(ord_outputs)
ord_nodes = list(self.dependency_iter())
input_set: set = set(graph.in_nodes)
if ord_inputs and not input_set.issuperset(ord_inputs):
raise ValueError("all feedable_inputs must be part of the graph inputs")
output_set: set = set(graph.out_nodes)
if ord_outputs and len(output_set.difference(ord_outputs)) > 0:
raise ValueError("all outputs must be part of the graph outputs")
# if no input order is specified use the graph endpoint order
outputs = dict.fromkeys(ord_outputs) if ord_outputs else graph.out_nodes
# if we don't provide inputs it will just treat them as callables
inputs = dict.fromkeys(ord_inputs) if ord_inputs else [] # graph.in_nodes
# check if they are all dynamic inputs
# in py3.7 the dict is an ordered set if we convert it back to a list
node_index = list(range(len(graph.nodes)))
feedable_inputs = list(inputs)
node_map = {}
for in_layer in feedable_inputs:
layer_i = node_index.pop(0)
in_name = in_layer.name.replace('/', '__')
layer_name = f"{in_name}_{layer_i}"
node_map[in_layer] = layer_name
args_str = ", ".join(node_map.values())
def_str = f"def {name}({args_str}):\n"
other_str = []
# all other inputs that are not feedable
other_inputs = list(input_set.difference(feedable_inputs))
node_map.update({in_layer: f"{in_layer.name}_{node_index.pop(0)}" for in_layer in other_inputs})
# requires outer access to layers var
for x in other_inputs:
other_str.append(f"\t{node_map[x]} = layers[\"{node_map[x]}\"].compute()")
other_str = "\n".join(other_str) + "\n" if other_str else ""
# remove inputs
# node_map contains input_nodes at this point
for _ in range(len(node_map)):
ord_nodes.pop(0)
compute_str = []
for current_node in ord_nodes:
node_name = current_node.name.replace('/', '__')
node_map[current_node] = f"{node_name}_{node_index.pop(0)}"
node_name = node_map[current_node]
# when layers have the same layer repeated as input, this causes problems
# it's better to use the same input_layers as declared in the graph
# dict from keys creates an ordered set which is not what we want
# next_nodes = dict.fromkeys(graph.edges_in[current_node])
next_nodes = graph.edges_in[current_node]
in_args = ", ".join([node_map[node] for node in next_nodes])
compute_str.append(f"\t{node_name} = layers[\"{node_name}\"].compute({in_args})")
compute_str = "\n".join(compute_str)
return_str = "\n\treturn {output_str}\n".format(output_str=", ".join([node_map[out] for out in outputs]))
full_fn_str = def_str + other_str + compute_str + return_str
logger.log(logging.DEBUG, f"converted function:\n {'-' * 10}\n\n {full_fn_str} \n{'-' * 10}")
# layer map (for the closure above)
# we feed the locals so that layers gets available in the above function
layers = {v: k for k, v in node_map.items()}
exec(full_fn_str, locals())
fn = eval(name)
fn.__doc__ = f"""{name}\n```python\n{full_fn_str}\n```"""
if compile:
fn = tf.function(fn)
return fn |
Python | def compute(self, *input_values):
""" computes the graph output values based on the given input values
Args:
*input_values: input values with the same order as the graph inputs, or a dictionary mapping values to
input layers.
Returns:
a tuple with the values for the correspondent graph outputs
"""
if len(input_values) == 1 and isinstance(input_values[0], dict):
input_dict = input_values[0]
missing = list(filter(lambda x: x not in self.in_nodes, input_dict.keys()))
if missing:
missing_str = '\n\t'.join([f"{str(x)}" for x in missing])
raise ValueError(f"inputs not found in graphs:\n"
f"\t{missing_str}")
ord_inputs = dict.fromkeys(list(self.in_nodes)[:len(input_dict)])
input_values = [as_tensor(input_dict[input_layer]) for input_layer in ord_inputs]
elif len(input_values) > len(self.in_nodes):
raise ValueError(f"too many inputs:\n"
f"\tgraph expects {len(self.in_nodes)} inputs\n"
f"\tinputs passed {len(input_values)}")
else:
input_values = [as_tensor(input_value) for input_value in input_values]
ord_inputs = dict.fromkeys(list(self.in_nodes)[:len(input_values)])
input_dict = dict(zip(ord_inputs.keys(), input_values))
other_inputs = set(self.in_nodes).difference(ord_inputs)
node_iter = self.dependency_iter()
result_cache = dict()
visited = set()
for node in node_iter:
if node in input_dict:
result_cache[node] = input_dict[node]
elif node in other_inputs:
result_cache[node] = node.compute()
else:
visited.add(node)
# get input_node result, clean cache when no more dependencies on the same input
def get_args(node):
args = []
ins = self.edges_in[node]
for in_node in ins:
res = result_cache[in_node]
priority, num_deps = node_iter[node]
node_iter[node] = (priority, num_deps - 1)
if num_deps - 1 == 0:
del result_cache[in_node]
args.append(res)
return args
args = get_args(node)
result_cache[node] = node.compute(*args)
# result_cache[node] = node(*args)
return tuple(map(lambda x: result_cache[x], self.out_nodes)) | def compute(self, *input_values):
""" computes the graph output values based on the given input values
Args:
*input_values: input values with the same order as the graph inputs, or a dictionary mapping values to
input layers.
Returns:
a tuple with the values for the correspondent graph outputs
"""
if len(input_values) == 1 and isinstance(input_values[0], dict):
input_dict = input_values[0]
missing = list(filter(lambda x: x not in self.in_nodes, input_dict.keys()))
if missing:
missing_str = '\n\t'.join([f"{str(x)}" for x in missing])
raise ValueError(f"inputs not found in graphs:\n"
f"\t{missing_str}")
ord_inputs = dict.fromkeys(list(self.in_nodes)[:len(input_dict)])
input_values = [as_tensor(input_dict[input_layer]) for input_layer in ord_inputs]
elif len(input_values) > len(self.in_nodes):
raise ValueError(f"too many inputs:\n"
f"\tgraph expects {len(self.in_nodes)} inputs\n"
f"\tinputs passed {len(input_values)}")
else:
input_values = [as_tensor(input_value) for input_value in input_values]
ord_inputs = dict.fromkeys(list(self.in_nodes)[:len(input_values)])
input_dict = dict(zip(ord_inputs.keys(), input_values))
other_inputs = set(self.in_nodes).difference(ord_inputs)
node_iter = self.dependency_iter()
result_cache = dict()
visited = set()
for node in node_iter:
if node in input_dict:
result_cache[node] = input_dict[node]
elif node in other_inputs:
result_cache[node] = node.compute()
else:
visited.add(node)
# get input_node result, clean cache when no more dependencies on the same input
def get_args(node):
args = []
ins = self.edges_in[node]
for in_node in ins:
res = result_cache[in_node]
priority, num_deps = node_iter[node]
node_iter[node] = (priority, num_deps - 1)
if num_deps - 1 == 0:
del result_cache[in_node]
args.append(res)
return args
args = get_args(node)
result_cache[node] = node.compute(*args)
# result_cache[node] = node(*args)
return tuple(map(lambda x: result_cache[x], self.out_nodes)) |
Python | def as_tensor(x, dtype=None):
""" Converts to tensor and casts to a given type if possible
Args:
x: an input ``Tensor``.
dtype: the type we which to cast the input tensor into
Returns:
``Tensor``: a tensor with the given dtype
"""
if dtype is not None:
dtype = tf.dtypes.as_dtype(dtype)
if not isinstance(x, tf.SparseTensor):
x = tf.convert_to_tensor(x)
if dtype is not None:
if x.dtype != dtype:
x = tf.cast(x, dtype)
return x | def as_tensor(x, dtype=None):
""" Converts to tensor and casts to a given type if possible
Args:
x: an input ``Tensor``.
dtype: the type we which to cast the input tensor into
Returns:
``Tensor``: a tensor with the given dtype
"""
if dtype is not None:
dtype = tf.dtypes.as_dtype(dtype)
if not isinstance(x, tf.SparseTensor):
x = tf.convert_to_tensor(x)
if dtype is not None:
if x.dtype != dtype:
x = tf.cast(x, dtype)
return x |
Python | def as_list(items):
""" Returns a list from one or multiple elements.
if one element is passed, returns a list with one element,
if a list,a tuple or a dictionary of elements is passed,
returns a list with the elements or the keys if the input is a dict
Note: we exclude SparseTensorValue because it is a named tuple
and we want to feed the whole object as a single data sample if needed
Args:
items: one item, a tuple of elements or a list of elements
Returns:
a :obj:`list` with the elements in items
"""
if items is None:
items = []
elif isinstance(items, (list, tuple, dict)) and not isinstance(items, (
tf.compat.v1.SparseTensorValue, tf.SparseTensor)):
items = list(items)
else:
items = [items]
return items | def as_list(items):
""" Returns a list from one or multiple elements.
if one element is passed, returns a list with one element,
if a list,a tuple or a dictionary of elements is passed,
returns a list with the elements or the keys if the input is a dict
Note: we exclude SparseTensorValue because it is a named tuple
and we want to feed the whole object as a single data sample if needed
Args:
items: one item, a tuple of elements or a list of elements
Returns:
a :obj:`list` with the elements in items
"""
if items is None:
items = []
elif isinstance(items, (list, tuple, dict)) and not isinstance(items, (
tf.compat.v1.SparseTensorValue, tf.SparseTensor)):
items = list(items)
else:
items = [items]
return items |
Python | def send_event(args):
""" make a dict with the data provided as args and turn it into JSON """
url = args.url
uri = args.uri
urluri = str(url) + str(uri)
datadict = {}
datadict['what'] = args.what
datadict['tags'] = args.tag
datadict['data'] = args.data
# convert the dict into JSON
jsondata = json.dumps(datadict).encode('utf8')
# https://stackoverflow.com/questions/31778800/how-can-i-make-a-post-request-on-python-with-urllib3
urllib3.disable_warnings()
http = urllib3.PoolManager()
http.request(
"POST",
urluri,
body=jsondata,
headers={'Content-Type': 'application/json'}) | def send_event(args):
""" make a dict with the data provided as args and turn it into JSON """
url = args.url
uri = args.uri
urluri = str(url) + str(uri)
datadict = {}
datadict['what'] = args.what
datadict['tags'] = args.tag
datadict['data'] = args.data
# convert the dict into JSON
jsondata = json.dumps(datadict).encode('utf8')
# https://stackoverflow.com/questions/31778800/how-can-i-make-a-post-request-on-python-with-urllib3
urllib3.disable_warnings()
http = urllib3.PoolManager()
http.request(
"POST",
urluri,
body=jsondata,
headers={'Content-Type': 'application/json'}) |
Python | def conanBuildTypes() -> list:
'''
These are the conan build types that we allow.
:return: The list of types allowed.
'''
return ["Debug", "Release"] | def conanBuildTypes() -> list:
'''
These are the conan build types that we allow.
:return: The list of types allowed.
'''
return ["Debug", "Release"] |
Python | def copyFiles(pattern:str, srcFolder:str, destFolder:str, keepPath:bool=True) -> list:
'''
Copies files having a specified pattern within the source folder. This function exists because I had problems using
`self.copy` inside the conanfile.
:param pattern: The wildcard filename pattern. See the python function `fnmatch.fnmatch` for information on wildcards.
:param srcFolder: The source folder (i.e. where the files are copied from)
:param destFolder: The destination folder (where files are copied to).
:param keepPath: If true, the relative path underneath `srcFolder` will be preserved for the copy into `destFolder`.
If false, the files are copied directly under destFolder.
:return: The list of copied files relative to the `destFolder`.
'''
output = []
files = findFiles(pattern, srcFolder)
for file in files:
srcFile = os.path.join(srcFolder, file)
if keepPath:
destFile = file
else:
destFile = os.path.basename(file)
output.append(destFile)
destFile = os.path.join(destFolder, destFile)
parentFolder = os.path.dirname(destFile)
if parentFolder != None and len(parentFolder) > 0:
os.makedirs(parentFolder, exist_ok=True)
shutil.copy2(srcFile, destFile, follow_symlinks=False)
return output | def copyFiles(pattern:str, srcFolder:str, destFolder:str, keepPath:bool=True) -> list:
'''
Copies files having a specified pattern within the source folder. This function exists because I had problems using
`self.copy` inside the conanfile.
:param pattern: The wildcard filename pattern. See the python function `fnmatch.fnmatch` for information on wildcards.
:param srcFolder: The source folder (i.e. where the files are copied from)
:param destFolder: The destination folder (where files are copied to).
:param keepPath: If true, the relative path underneath `srcFolder` will be preserved for the copy into `destFolder`.
If false, the files are copied directly under destFolder.
:return: The list of copied files relative to the `destFolder`.
'''
output = []
files = findFiles(pattern, srcFolder)
for file in files:
srcFile = os.path.join(srcFolder, file)
if keepPath:
destFile = file
else:
destFile = os.path.basename(file)
output.append(destFile)
destFile = os.path.join(destFolder, destFile)
parentFolder = os.path.dirname(destFile)
if parentFolder != None and len(parentFolder) > 0:
os.makedirs(parentFolder, exist_ok=True)
shutil.copy2(srcFile, destFile, follow_symlinks=False)
return output |
Python | def copyOneFile(pattern:str, srcFolder:str, destFolder:str, keepPath:bool=True) -> str:
'''
This is the same as `copyFile` except it throws if the number of files copied is not exactly one.
'''
output = copyFiles(pattern, srcFolder, destFolder, keepPath)
if len(output) == 0:
raise Exception(f"Failed to find {pattern} within folder: {srcFolder}")
if len(output) > 1:
raise Exception(f"Found multiple {pattern} within folder: {srcFolder}")
return output[0] | def copyOneFile(pattern:str, srcFolder:str, destFolder:str, keepPath:bool=True) -> str:
'''
This is the same as `copyFile` except it throws if the number of files copied is not exactly one.
'''
output = copyFiles(pattern, srcFolder, destFolder, keepPath)
if len(output) == 0:
raise Exception(f"Failed to find {pattern} within folder: {srcFolder}")
if len(output) > 1:
raise Exception(f"Found multiple {pattern} within folder: {srcFolder}")
return output[0] |
Python | def copyOneSharedObjectFileGroup(pattern:str, srcFolder:str, destFolder:str, keepPath:bool=True) -> list:
'''
The same as `copySharedObjectFileGroups` except there must be exactly one group, otherwise it throws.
'''
output = []
groups = findSharedObjectGroups(pattern, srcFolder)
if len(groups) == 0:
raise Exception(f"Failed to find {pattern} within folder: {srcFolder}")
if len(groups) > 1:
raise Exception(f"Found multiple {pattern} groups within folder: {srcFolder}")
for group in groups:
for file in group:
srcFile = os.path.join(srcFolder, file)
if keepPath:
destFile = file
else:
destFile = os.path.basename(file)
output.append(destFile)
destFile = os.path.join(destFolder, destFile)
parentFolder = os.path.dirname(destFile)
if parentFolder != None and len(parentFolder) > 0:
os.makedirs(parentFolder, exist_ok=True)
shutil.copy2(srcFile, destFile, follow_symlinks=False)
return output | def copyOneSharedObjectFileGroup(pattern:str, srcFolder:str, destFolder:str, keepPath:bool=True) -> list:
'''
The same as `copySharedObjectFileGroups` except there must be exactly one group, otherwise it throws.
'''
output = []
groups = findSharedObjectGroups(pattern, srcFolder)
if len(groups) == 0:
raise Exception(f"Failed to find {pattern} within folder: {srcFolder}")
if len(groups) > 1:
raise Exception(f"Found multiple {pattern} groups within folder: {srcFolder}")
for group in groups:
for file in group:
srcFile = os.path.join(srcFolder, file)
if keepPath:
destFile = file
else:
destFile = os.path.basename(file)
output.append(destFile)
destFile = os.path.join(destFolder, destFile)
parentFolder = os.path.dirname(destFile)
if parentFolder != None and len(parentFolder) > 0:
os.makedirs(parentFolder, exist_ok=True)
shutil.copy2(srcFile, destFile, follow_symlinks=False)
return output |
Python | def copySharedObjectFileGroups(pattern:str, srcFolder:str, destFolder:str, keepPath:bool=True) -> list:
'''
Each *.so file can have multiple companions with a version number appended after the `.so` suffix. The companions
can be files or links to files. For example:
libwhatever.so
libwhatever.so.0 [symlink --> libwhatever.so.0.1234.0]
libwhatever.so.0.1234.0
libwhatever.so.1 [symlink --> libwhatever.so.1.4321.0]
libwhatever.so.1.4321.0
This method finds any filename whose prefix (before the `.so`) match the given pattern. For each pattern, the `*.so`
file is copied with all of it's companions.
:param pattern: The wildcard filename pattern for the prefix of the file (before the `.so`). This should not include
the `.so` or anything that comes after. See the python function `fnmatch.fnmatch` for information on wildcards.
:param srcFolder: The source folder (i.e. where the files are copied from)
:param destFolder: The destination folder (where files are copied to).
:param keepPath: If true, the relative path underneath `srcFolder` will be preserved for the copy into `destFolder`.
If false, the files are copied directly under destFolder.
:return: The list of copied files relative to the `destFolder`.
'''
output = []
groups = findSharedObjectGroups(pattern, srcFolder)
for group in groups:
for file in groups:
srcFile = os.path.join(srcFolder, file)
if keepPath:
destFile = file
else:
destFile = os.path.basename(file)
output.append(destFile)
destFile = os.path.join(destFolder, destFile)
parentFolder = os.path.dirname(destFile)
if parentFolder != None and len(parentFolder) > 0:
os.makedirs(parentFolder, exist_ok=True)
shutil.copy2(srcFile, destFile, follow_symlinks=False)
return output | def copySharedObjectFileGroups(pattern:str, srcFolder:str, destFolder:str, keepPath:bool=True) -> list:
'''
Each *.so file can have multiple companions with a version number appended after the `.so` suffix. The companions
can be files or links to files. For example:
libwhatever.so
libwhatever.so.0 [symlink --> libwhatever.so.0.1234.0]
libwhatever.so.0.1234.0
libwhatever.so.1 [symlink --> libwhatever.so.1.4321.0]
libwhatever.so.1.4321.0
This method finds any filename whose prefix (before the `.so`) match the given pattern. For each pattern, the `*.so`
file is copied with all of it's companions.
:param pattern: The wildcard filename pattern for the prefix of the file (before the `.so`). This should not include
the `.so` or anything that comes after. See the python function `fnmatch.fnmatch` for information on wildcards.
:param srcFolder: The source folder (i.e. where the files are copied from)
:param destFolder: The destination folder (where files are copied to).
:param keepPath: If true, the relative path underneath `srcFolder` will be preserved for the copy into `destFolder`.
If false, the files are copied directly under destFolder.
:return: The list of copied files relative to the `destFolder`.
'''
output = []
groups = findSharedObjectGroups(pattern, srcFolder)
for group in groups:
for file in groups:
srcFile = os.path.join(srcFolder, file)
if keepPath:
destFile = file
else:
destFile = os.path.basename(file)
output.append(destFile)
destFile = os.path.join(destFolder, destFile)
parentFolder = os.path.dirname(destFile)
if parentFolder != None and len(parentFolder) > 0:
os.makedirs(parentFolder, exist_ok=True)
shutil.copy2(srcFile, destFile, follow_symlinks=False)
return output |
Python | def dockerfileChoices() -> list:
'''
The list of choices for docker containers which can be used to build the conan packages. This is basically
the list of folders under `gst-conan/distros` (where each folder contains a dockerfile).
:return:
'''
distrosFolder = base.gstConanDistrosFolder()
output = list(set(glob.glob(distrosFolder + "/*/Dockerfile")))
for i, dockerfile in enumerate(output):
output[i] = os.path.basename(os.path.dirname(dockerfile))
output.sort()
return output | def dockerfileChoices() -> list:
'''
The list of choices for docker containers which can be used to build the conan packages. This is basically
the list of folders under `gst-conan/distros` (where each folder contains a dockerfile).
:return:
'''
distrosFolder = base.gstConanDistrosFolder()
output = list(set(glob.glob(distrosFolder + "/*/Dockerfile")))
for i, dockerfile in enumerate(output):
output[i] = os.path.basename(os.path.dirname(dockerfile))
output.sort()
return output |
Python | def doConanPackage(conanfile:ConanFile, packageInfo:configuration.PackageInfo, buildOutputFolder:str) -> None:
'''
This is typically called from the conanfile during from the `package` function. This method executes most of the
logic around copying build output, but it does not copy header files. The caller must do that.
:param conanfile: The conanfile at the time whent the `package` function is being called.
:param packageInfo: The package information.
:param buildOutputFolder: The folder where build output can be found.
:return: Nothing.
'''
try:
#if conanfile.settings.os == "Windows":
# extExe = ".exe"
# extLib = ".lib"
# extSo = ".dll"
#elif conanfile.settings.os == "Linux":
# extExe = ""
# extLib = ".a"
# extSo = ".so"
#else:
# raise Exception("Unsupported OS: " + str(conanfile.settings.os))
extExe = ""
extLib = ".a"
extSo = ".so"
# Copy executables to 'bin' folder
for exe in packageInfo.executables:
copyOneFile(f"{exe}{extExe}",
srcFolder=buildOutputFolder,
destFolder=os.path.join(conanfile.package_folder, "bin"),
keepPath=False)
# Copy static libs to 'lib' folder
for lib in packageInfo.staticlibs:
copyOneFile(f"{lib}{extLib}",
srcFolder=buildOutputFolder,
destFolder=os.path.join(conanfile.package_folder, "lib"),
keepPath=False)
# Copy plugins to 'plugins' folder
if packageInfo.plugins:
for pluginName, pluginInfo in packageInfo.plugins.items():
if pluginInfo.get("optional"):
doPlugin = eval(f"conanfile.options.{pluginName}")
else:
doPlugin = True
if doPlugin:
lib = pluginInfo.get("lib")
if lib:
lib = f"{lib}"
else:
lib = f"libgst{pluginName}"
destFolder = os.path.join(conanfile.package_folder, "plugins")
try:
if conanfile.settings.os == "Linux":
copyOneSharedObjectFileGroup(lib, buildOutputFolder, destFolder, keepPath=False)
else:
copyOneFile(f"{lib}{extSo}", buildOutputFolder, destFolder, keepPath=False)
except Exception:
conanfile.output.error(f"Failed to find the file {lib}{extSo}.")
conanfile.output.error(f"You may need to install some packages on your machine.")
conanfile.output.error(f"Look for machine setup instructions: https://github.com/Panopto/gst-conan")
innerException = sys.exc_info()[0]
raise Exception(f"Failed to find the file {lib}{extSo}.") from innerException
# Start a list of sharedlibs to be copied.
if packageInfo.sharedlibs:
sharedlibs = packageInfo.sharedlibs.copy()
else:
sharedlibs = []
# Run through pkg-config files
if packageInfo.pkgconfigs:
srcPcFolder = os.path.join(buildOutputFolder, "pkgconfig")
destGirFolder = os.path.join(conanfile.package_folder, "data", "gir-1.0")
destPcFolder = os.path.join(conanfile.package_folder, "pc-installed")
destTypelibFolder = os.path.join(conanfile.package_folder, "lib", "girepository-1.0")
os.makedirs(destPcFolder)
for pcName, pcInfo in packageInfo.pkgconfigs.items():
lib = pcInfo.get("lib")
if lib != None:
sharedlibs.append(lib)
gir = pcInfo.get("gir")
if gir != None:
copyOneFile(f"{gir}.gir", buildOutputFolder, destGirFolder, keepPath=False)
copyOneFile(f"{gir}.typelib", buildOutputFolder, destTypelibFolder, keepPath=False)
# Copy the original pkg-config file
shutil.copy2(src=os.path.join( srcPcFolder, f"{pcName}.pc"),
dst=os.path.join(destPcFolder, f"{pcName}.pc"))
# Load the pkg-config file, modify, and save
pcFile = PkgConfigFile()
pcFile.load(os.path.join(srcPcFolder, f"{pcName}.pc"))
pcFile.variables["prefix"] = conanfile.package_folder
pcFile.variables["exec_prefix"] = "${prefix}"
pcFile.variables["libdir"] = "${prefix}/lib"
pcFile.variables["includedir"] = "${prefix}/include"
if pcFile.variables.get("pluginsdir"):
pcFile.variables["pluginsdir"] = "${prefix}/plugins"
if pcFile.variables.get("toolsdir"):
pcFile.variables["toolsdir"] = "${prefix}/bin"
if pcFile.variables.get("datarootdir"):
pcFile.variables["datarootdir"] = "${prefix}/data"
if pcFile.variables.get("datadir"):
pcFile.variables["datadir"] = "${prefix}/data"
if pcFile.variables.get("girdir"):
pcFile.variables["girdir"] = "${prefix}/data/gir-1.0"
if pcFile.variables.get("typelibdir"):
pcFile.variables["typelibdir"] = "${libdir}/girepository-1.0"
# This is where conan's cmake generator expects the *.pc files to be.
pcFile.save(os.path.join(conanfile.package_folder, f"{pcName}.pc"))
# Copy shared libs to 'lib' folder
for lib in sharedlibs:
if conanfile.settings.os == "Linux":
copyOneSharedObjectFileGroup(lib,
srcFolder=buildOutputFolder,
destFolder=os.path.join(conanfile.package_folder, "lib"),
keepPath=False)
else:
copyOneFile(f"{lib}{extSo}",
srcFolder=buildOutputFolder,
destFolder=os.path.join(conanfile.package_folder, "lib"),
keepPath=False)
except:
conanfile.output.error(traceback.format_exc())
raise | def doConanPackage(conanfile:ConanFile, packageInfo:configuration.PackageInfo, buildOutputFolder:str) -> None:
'''
This is typically called from the conanfile during from the `package` function. This method executes most of the
logic around copying build output, but it does not copy header files. The caller must do that.
:param conanfile: The conanfile at the time whent the `package` function is being called.
:param packageInfo: The package information.
:param buildOutputFolder: The folder where build output can be found.
:return: Nothing.
'''
try:
#if conanfile.settings.os == "Windows":
# extExe = ".exe"
# extLib = ".lib"
# extSo = ".dll"
#elif conanfile.settings.os == "Linux":
# extExe = ""
# extLib = ".a"
# extSo = ".so"
#else:
# raise Exception("Unsupported OS: " + str(conanfile.settings.os))
extExe = ""
extLib = ".a"
extSo = ".so"
# Copy executables to 'bin' folder
for exe in packageInfo.executables:
copyOneFile(f"{exe}{extExe}",
srcFolder=buildOutputFolder,
destFolder=os.path.join(conanfile.package_folder, "bin"),
keepPath=False)
# Copy static libs to 'lib' folder
for lib in packageInfo.staticlibs:
copyOneFile(f"{lib}{extLib}",
srcFolder=buildOutputFolder,
destFolder=os.path.join(conanfile.package_folder, "lib"),
keepPath=False)
# Copy plugins to 'plugins' folder
if packageInfo.plugins:
for pluginName, pluginInfo in packageInfo.plugins.items():
if pluginInfo.get("optional"):
doPlugin = eval(f"conanfile.options.{pluginName}")
else:
doPlugin = True
if doPlugin:
lib = pluginInfo.get("lib")
if lib:
lib = f"{lib}"
else:
lib = f"libgst{pluginName}"
destFolder = os.path.join(conanfile.package_folder, "plugins")
try:
if conanfile.settings.os == "Linux":
copyOneSharedObjectFileGroup(lib, buildOutputFolder, destFolder, keepPath=False)
else:
copyOneFile(f"{lib}{extSo}", buildOutputFolder, destFolder, keepPath=False)
except Exception:
conanfile.output.error(f"Failed to find the file {lib}{extSo}.")
conanfile.output.error(f"You may need to install some packages on your machine.")
conanfile.output.error(f"Look for machine setup instructions: https://github.com/Panopto/gst-conan")
innerException = sys.exc_info()[0]
raise Exception(f"Failed to find the file {lib}{extSo}.") from innerException
# Start a list of sharedlibs to be copied.
if packageInfo.sharedlibs:
sharedlibs = packageInfo.sharedlibs.copy()
else:
sharedlibs = []
# Run through pkg-config files
if packageInfo.pkgconfigs:
srcPcFolder = os.path.join(buildOutputFolder, "pkgconfig")
destGirFolder = os.path.join(conanfile.package_folder, "data", "gir-1.0")
destPcFolder = os.path.join(conanfile.package_folder, "pc-installed")
destTypelibFolder = os.path.join(conanfile.package_folder, "lib", "girepository-1.0")
os.makedirs(destPcFolder)
for pcName, pcInfo in packageInfo.pkgconfigs.items():
lib = pcInfo.get("lib")
if lib != None:
sharedlibs.append(lib)
gir = pcInfo.get("gir")
if gir != None:
copyOneFile(f"{gir}.gir", buildOutputFolder, destGirFolder, keepPath=False)
copyOneFile(f"{gir}.typelib", buildOutputFolder, destTypelibFolder, keepPath=False)
# Copy the original pkg-config file
shutil.copy2(src=os.path.join( srcPcFolder, f"{pcName}.pc"),
dst=os.path.join(destPcFolder, f"{pcName}.pc"))
# Load the pkg-config file, modify, and save
pcFile = PkgConfigFile()
pcFile.load(os.path.join(srcPcFolder, f"{pcName}.pc"))
pcFile.variables["prefix"] = conanfile.package_folder
pcFile.variables["exec_prefix"] = "${prefix}"
pcFile.variables["libdir"] = "${prefix}/lib"
pcFile.variables["includedir"] = "${prefix}/include"
if pcFile.variables.get("pluginsdir"):
pcFile.variables["pluginsdir"] = "${prefix}/plugins"
if pcFile.variables.get("toolsdir"):
pcFile.variables["toolsdir"] = "${prefix}/bin"
if pcFile.variables.get("datarootdir"):
pcFile.variables["datarootdir"] = "${prefix}/data"
if pcFile.variables.get("datadir"):
pcFile.variables["datadir"] = "${prefix}/data"
if pcFile.variables.get("girdir"):
pcFile.variables["girdir"] = "${prefix}/data/gir-1.0"
if pcFile.variables.get("typelibdir"):
pcFile.variables["typelibdir"] = "${libdir}/girepository-1.0"
# This is where conan's cmake generator expects the *.pc files to be.
pcFile.save(os.path.join(conanfile.package_folder, f"{pcName}.pc"))
# Copy shared libs to 'lib' folder
for lib in sharedlibs:
if conanfile.settings.os == "Linux":
copyOneSharedObjectFileGroup(lib,
srcFolder=buildOutputFolder,
destFolder=os.path.join(conanfile.package_folder, "lib"),
keepPath=False)
else:
copyOneFile(f"{lib}{extSo}",
srcFolder=buildOutputFolder,
destFolder=os.path.join(conanfile.package_folder, "lib"),
keepPath=False)
except:
conanfile.output.error(traceback.format_exc())
raise |
Python | def doConanPackageInfo(conanfile:ConanFile, packageInfo:configuration.PackageInfo) -> None:
'''
This is typically called from the conanfile during from the `package_info` function. This method executes
all of the logic around attaching user_info and cpp_info to the conan package.
:param conanfile: The conanfile at the time whent the `package_info` function is being called.
:param packageInfo: The package information.
:param buildOutputFolder: The folder where build output can be found.
:return: Nothing.
'''
try:
conanfile.cpp_info.bindirs = ["bin"]
conanfile.cpp_info.includedirs = ["include"]
conanfile.cpp_info.libdirs = ["lib"]
#if conanfile.settings.os == "Windows":
# extSo = ".dll"
# extLib = ".lib"
#elif conanfile.settings.os == "Linux":
# extSo = ".so"
# extLib = ".a"
#else:
# raise Exception(f"Unsupported OS: {conanfile.settings.os}")
extSo = ".so"
extLib = ".a"
conanfile.cpp_info.libs = []
for pcName, pcInfo in packageInfo.pkgconfigs.items():
lib = pcInfo.get("lib")
if lib != None:
conanfile.cpp_info.libs.append(f"{lib}{extSo}")
for lib in packageInfo.sharedlibs:
conanfile.cpp_info.libs.append(f"{lib}{extSo}")
for lib in packageInfo.staticlibs:
conanfile.cpp_info.libs.append(f"{lib}{extLib}")
if packageInfo.plugins and len(packageInfo.plugins) > 0:
conanfile.user_info.plugins = os.path.join(conanfile.cpp_info.rootpath, "plugins")
except:
conanfile.output.error(traceback.format_exc())
raise | def doConanPackageInfo(conanfile:ConanFile, packageInfo:configuration.PackageInfo) -> None:
'''
This is typically called from the conanfile during from the `package_info` function. This method executes
all of the logic around attaching user_info and cpp_info to the conan package.
:param conanfile: The conanfile at the time whent the `package_info` function is being called.
:param packageInfo: The package information.
:param buildOutputFolder: The folder where build output can be found.
:return: Nothing.
'''
try:
conanfile.cpp_info.bindirs = ["bin"]
conanfile.cpp_info.includedirs = ["include"]
conanfile.cpp_info.libdirs = ["lib"]
#if conanfile.settings.os == "Windows":
# extSo = ".dll"
# extLib = ".lib"
#elif conanfile.settings.os == "Linux":
# extSo = ".so"
# extLib = ".a"
#else:
# raise Exception(f"Unsupported OS: {conanfile.settings.os}")
extSo = ".so"
extLib = ".a"
conanfile.cpp_info.libs = []
for pcName, pcInfo in packageInfo.pkgconfigs.items():
lib = pcInfo.get("lib")
if lib != None:
conanfile.cpp_info.libs.append(f"{lib}{extSo}")
for lib in packageInfo.sharedlibs:
conanfile.cpp_info.libs.append(f"{lib}{extSo}")
for lib in packageInfo.staticlibs:
conanfile.cpp_info.libs.append(f"{lib}{extLib}")
if packageInfo.plugins and len(packageInfo.plugins) > 0:
conanfile.user_info.plugins = os.path.join(conanfile.cpp_info.rootpath, "plugins")
except:
conanfile.output.error(traceback.format_exc())
raise |
Python | def findFiles(pattern:str, folder:str, recursive:bool=True, prefix=None) -> list:
'''
Find a file from the given folder having the given pattern.
:param pattern: The pattern to find. See the python function: `fnmatch.fnmatch`
:param folder: The folder inside which to search. Results will be relative to this folder.
:param recursive: If true, sub-folders are searched.
:param prefix: The superordinate path that is joined (prepended) to each element of the output.
:return: The list of files discovered.
'''
output = []
for item in os.listdir(folder):
path = os.path.join(folder, item)
if os.path.isfile(path):
if fnmatch.fnmatch(item, pattern):
if prefix == None:
output.append(item)
else:
output.append(os.path.join(prefix, item))
elif os.path.isdir(path) and recursive:
subPrefix = item
if prefix != None:
subPrefix = os.path.join(prefix, subPrefix)
output += findFiles(pattern, path, recursive=True, prefix=subPrefix)
return output | def findFiles(pattern:str, folder:str, recursive:bool=True, prefix=None) -> list:
'''
Find a file from the given folder having the given pattern.
:param pattern: The pattern to find. See the python function: `fnmatch.fnmatch`
:param folder: The folder inside which to search. Results will be relative to this folder.
:param recursive: If true, sub-folders are searched.
:param prefix: The superordinate path that is joined (prepended) to each element of the output.
:return: The list of files discovered.
'''
output = []
for item in os.listdir(folder):
path = os.path.join(folder, item)
if os.path.isfile(path):
if fnmatch.fnmatch(item, pattern):
if prefix == None:
output.append(item)
else:
output.append(os.path.join(prefix, item))
elif os.path.isdir(path) and recursive:
subPrefix = item
if prefix != None:
subPrefix = os.path.join(prefix, subPrefix)
output += findFiles(pattern, path, recursive=True, prefix=subPrefix)
return output |
Python | def findSharedObjectGroups(pattern:str, folder:str, recursive:bool=True, prefix=None) -> list:
'''
Find a set of shared objects from the given folder having the given pattern.
Each *.so file can have multiple companions with a version number appended after the `.so` suffix. The companions
can be files or links to files. For example:
libwhatever.so
libwhatever.so.0 [symlink --> libwhatever.so.0.1234.0]
libwhatever.so.0.1234.0
libwhatever.so.1 [symlink --> libwhatever.so.1.4321.0]
libwhatever.so.1.4321.0
The companions of each *.so file are grouped such that the members of each group share a common character sequence
prior to ".so".
This method finds any filename whose prefix (before the `.so`) match the given pattern. For each pattern, the `*.so`
file is copied with all of it's companions.
:param pattern: The wildcard filename pattern for the prefix of the file (before the `.so`). This should not include
the `.so` or anything that comes after. See the python function `fnmatch.fnmatch` for information on wildcards.
:param folder: The folder inside which to search. Results will be relative to this folder.
:param recursive: If true, sub-folders are searched.
:param prefix: The superordinate path that is joined (prepended) to each element of the output.
:return: The list of lists. Each inner list represents a single set of *.so files.
'''
output = []
pattern0 = pattern + ".so"
pattern1 = pattern + ".so.*"
rePattern = re.compile('.+\.so\.[0-9\.]*\d+$')
files = []
for item in os.listdir(folder):
path = os.path.join(folder, item)
if os.path.isfile(path):
if fnmatch.fnmatch(item, pattern0) \
or (fnmatch.fnmatch(item, pattern1) and (None != rePattern.fullmatch(item))):
files.append(item)
elif os.path.isdir(path) and recursive:
subPrefix = item
if prefix != None:
subPrefix = os.path.join(prefix, subPrefix)
output += findSharedObjectGroups(pattern, path, recursive=True, prefix=subPrefix)
if len(files) > 0:
files.sort()
fileGroups = groupSoFiles(files)
if prefix != None:
for i, fileSet in enumerate(fileGroups):
for j, file in enumerate(fileSet):
fileSet[j] = os.path.join(prefix, file)
# FIXME: I don't know enough about Python to know whether this line is necessary:
fileGroups[i] = fileSet
output += fileGroups
return output | def findSharedObjectGroups(pattern:str, folder:str, recursive:bool=True, prefix=None) -> list:
'''
Find a set of shared objects from the given folder having the given pattern.
Each *.so file can have multiple companions with a version number appended after the `.so` suffix. The companions
can be files or links to files. For example:
libwhatever.so
libwhatever.so.0 [symlink --> libwhatever.so.0.1234.0]
libwhatever.so.0.1234.0
libwhatever.so.1 [symlink --> libwhatever.so.1.4321.0]
libwhatever.so.1.4321.0
The companions of each *.so file are grouped such that the members of each group share a common character sequence
prior to ".so".
This method finds any filename whose prefix (before the `.so`) match the given pattern. For each pattern, the `*.so`
file is copied with all of it's companions.
:param pattern: The wildcard filename pattern for the prefix of the file (before the `.so`). This should not include
the `.so` or anything that comes after. See the python function `fnmatch.fnmatch` for information on wildcards.
:param folder: The folder inside which to search. Results will be relative to this folder.
:param recursive: If true, sub-folders are searched.
:param prefix: The superordinate path that is joined (prepended) to each element of the output.
:return: The list of lists. Each inner list represents a single set of *.so files.
'''
output = []
pattern0 = pattern + ".so"
pattern1 = pattern + ".so.*"
rePattern = re.compile('.+\.so\.[0-9\.]*\d+$')
files = []
for item in os.listdir(folder):
path = os.path.join(folder, item)
if os.path.isfile(path):
if fnmatch.fnmatch(item, pattern0) \
or (fnmatch.fnmatch(item, pattern1) and (None != rePattern.fullmatch(item))):
files.append(item)
elif os.path.isdir(path) and recursive:
subPrefix = item
if prefix != None:
subPrefix = os.path.join(prefix, subPrefix)
output += findSharedObjectGroups(pattern, path, recursive=True, prefix=subPrefix)
if len(files) > 0:
files.sort()
fileGroups = groupSoFiles(files)
if prefix != None:
for i, fileSet in enumerate(fileGroups):
for j, file in enumerate(fileSet):
fileSet[j] = os.path.join(prefix, file)
# FIXME: I don't know enough about Python to know whether this line is necessary:
fileGroups[i] = fileSet
output += fileGroups
return output |
Python | def groupSoFiles(sortedList:list) -> list:
'''
Groups a list of *.so files into sets.
Each *.so file can have multiple companions with a version number appended after the `.so` suffix. The companions
can be files or links to files. For example:
libwhatever.so [symlink --> libwhatever.so.0]
libwhatever.so.0 [symlink --> libwhatever.so.0.1234.0]
libwhatever.so.0.1234.0
libwhatever.so.1 [symlink --> libwhatever.so.1.4321.0]
libwhatever.so.1.4321.0
The companions of each *.so file are grouped such that the members of each group share a common character sequence
prior to ".so".
:param sortedList: A list of filenames which has been sorted alphabetically.
:return: A list of lists. Each inner list is a single grouping of *.so files.
'''
output = []
thisGroup = []
thisPrefix = None
for item in sortedList:
if thisPrefix == None:
idx = item.find(".so")
thisPrefix = item[:idx]
thisGroup = [item]
elif item.startswith(thisPrefix):
thisGroup.append(item)
else:
output.append(thisGroup)
idx = item.find(".so")
thisPrefix = item[:idx]
thisGroup = [item]
if len(thisGroup) > 0:
output.append(thisGroup)
return output | def groupSoFiles(sortedList:list) -> list:
'''
Groups a list of *.so files into sets.
Each *.so file can have multiple companions with a version number appended after the `.so` suffix. The companions
can be files or links to files. For example:
libwhatever.so [symlink --> libwhatever.so.0]
libwhatever.so.0 [symlink --> libwhatever.so.0.1234.0]
libwhatever.so.0.1234.0
libwhatever.so.1 [symlink --> libwhatever.so.1.4321.0]
libwhatever.so.1.4321.0
The companions of each *.so file are grouped such that the members of each group share a common character sequence
prior to ".so".
:param sortedList: A list of filenames which has been sorted alphabetically.
:return: A list of lists. Each inner list is a single grouping of *.so files.
'''
output = []
thisGroup = []
thisPrefix = None
for item in sortedList:
if thisPrefix == None:
idx = item.find(".so")
thisPrefix = item[:idx]
thisGroup = [item]
elif item.startswith(thisPrefix):
thisGroup.append(item)
else:
output.append(thisGroup)
idx = item.find(".so")
thisPrefix = item[:idx]
thisGroup = [item]
if len(thisGroup) > 0:
output.append(thisGroup)
return output |
Python | def createWithoutDocker(packagesFolder:str, revision:str, version:str,
build_type:str, user:str, channel:str, extraArgs:list) -> None:
'''
Implements `conan create` for all packages when the command is used without the "--docker" flag.
Throws on error.
:param packagesFolder: The folder which contains the conanfiles for all packages.
:param revision: The revision to pull from all Gstreamer repos. This can be a branch name, a sha, or a tag.
:param version: The version of Gstreamer being packaged, and part of the conan package id.
:param build_type: The conan build_type setting ("Debug" or "Release").
:param user: The user which is part of the conan package id.
:param channel: The channel which is part of the conan package id.
:param extraArgs: A list of extra arguments to be passed to conan over the command line.
:return: Nothing.
'''
config = configuration.getCurrent()
# Extra args to be appended to the end of the `conan create ` command
xargs = ""
if extraArgs != None or len(extraArgs) > 0:
xargs = subprocess.list2cmdline(extraArgs)
env = os.environ.copy()
env['GST_CONAN_REVISION'] = revision
for packageName, packageInfo in config.packages.items():
packageFolder = os.path.join(packagesFolder, packageName)
cmd = f"conan create {packageFolder} {packageName}/{version}@{user}/{channel} --build missing -s build_type={build_type} {xargs}"
base.execute(cmd, env=env) | def createWithoutDocker(packagesFolder:str, revision:str, version:str,
build_type:str, user:str, channel:str, extraArgs:list) -> None:
'''
Implements `conan create` for all packages when the command is used without the "--docker" flag.
Throws on error.
:param packagesFolder: The folder which contains the conanfiles for all packages.
:param revision: The revision to pull from all Gstreamer repos. This can be a branch name, a sha, or a tag.
:param version: The version of Gstreamer being packaged, and part of the conan package id.
:param build_type: The conan build_type setting ("Debug" or "Release").
:param user: The user which is part of the conan package id.
:param channel: The channel which is part of the conan package id.
:param extraArgs: A list of extra arguments to be passed to conan over the command line.
:return: Nothing.
'''
config = configuration.getCurrent()
# Extra args to be appended to the end of the `conan create ` command
xargs = ""
if extraArgs != None or len(extraArgs) > 0:
xargs = subprocess.list2cmdline(extraArgs)
env = os.environ.copy()
env['GST_CONAN_REVISION'] = revision
for packageName, packageInfo in config.packages.items():
packageFolder = os.path.join(packagesFolder, packageName)
cmd = f"conan create {packageFolder} {packageName}/{version}@{user}/{channel} --build missing -s build_type={build_type} {xargs}"
base.execute(cmd, env=env) |
Python | def read_fastcgi_record(input):
"""reads the main fast cgi record"""
data = input.read(8) # read record
if not data:
# no more data, our other process must have died...
raise _ExitException()
content_size = ord(data[4]) << 8 | ord(data[5])
content = input.read(content_size) # read content
input.read(ord(data[6])) # read padding
if ord(data[0]) != FCGI_VERSION_1:
raise Exception('Unknown fastcgi version ' + str(data[0]))
req_id = (ord(data[2]) << 8) | ord(data[3])
reqtype = ord(data[1])
processor = REQUEST_PROCESSORS.get(reqtype)
if processor is None:
# unknown type requested, send response
send_response(req_id, FCGI_UNKNOWN_TYPE, data[1] + '\0' * 7)
return None
return processor(req_id, content) | def read_fastcgi_record(input):
"""reads the main fast cgi record"""
data = input.read(8) # read record
if not data:
# no more data, our other process must have died...
raise _ExitException()
content_size = ord(data[4]) << 8 | ord(data[5])
content = input.read(content_size) # read content
input.read(ord(data[6])) # read padding
if ord(data[0]) != FCGI_VERSION_1:
raise Exception('Unknown fastcgi version ' + str(data[0]))
req_id = (ord(data[2]) << 8) | ord(data[3])
reqtype = ord(data[1])
processor = REQUEST_PROCESSORS.get(reqtype)
if processor is None:
# unknown type requested, send response
send_response(req_id, FCGI_UNKNOWN_TYPE, data[1] + '\0' * 7)
return None
return processor(req_id, content) |
Python | def read_fastcgi_begin_request(req_id, content):
"""reads the begin request body and updates our
_REQUESTS table to include the new request"""
# typedef struct {
# unsigned char roleB1;
# unsigned char roleB0;
# unsigned char flags;
# unsigned char reserved[5];
# } FCGI_BeginRequestBody;
# TODO: Ignore request if it exists
res = FastCgiRecord(
FCGI_BEGIN_REQUEST,
req_id,
(ord(content[0]) << 8) | ord(content[1]), # role
ord(content[2]), # flags
)
_REQUESTS[req_id] = res | def read_fastcgi_begin_request(req_id, content):
"""reads the begin request body and updates our
_REQUESTS table to include the new request"""
# typedef struct {
# unsigned char roleB1;
# unsigned char roleB0;
# unsigned char flags;
# unsigned char reserved[5];
# } FCGI_BeginRequestBody;
# TODO: Ignore request if it exists
res = FastCgiRecord(
FCGI_BEGIN_REQUEST,
req_id,
(ord(content[0]) << 8) | ord(content[1]), # role
ord(content[2]), # flags
)
_REQUESTS[req_id] = res |
Python | def read_fastcgi_keyvalue_pairs(content, offset):
"""Reads a FastCGI key/value pair stream"""
name_len = ord(content[offset])
if (name_len & 0x80) != 0:
name_full_len = chr(name_len & ~0x80) + content[offset + 1:offset+4]
name_len = int_struct.unpack(name_full_len)[0]
offset += 4
else:
offset += 1
value_len = ord(content[offset])
if (value_len & 0x80) != 0:
value_full_len = chr(value_len & ~0x80) + content[offset+1:offset+4]
value_len = int_struct.unpack(value_full_len)[0]
offset += 4
else:
offset += 1
name = content[offset:offset+name_len]
offset += name_len
value = content[offset:offset+value_len]
offset += value_len
return offset, name, value | def read_fastcgi_keyvalue_pairs(content, offset):
"""Reads a FastCGI key/value pair stream"""
name_len = ord(content[offset])
if (name_len & 0x80) != 0:
name_full_len = chr(name_len & ~0x80) + content[offset + 1:offset+4]
name_len = int_struct.unpack(name_full_len)[0]
offset += 4
else:
offset += 1
value_len = ord(content[offset])
if (value_len & 0x80) != 0:
value_full_len = chr(value_len & ~0x80) + content[offset+1:offset+4]
value_len = int_struct.unpack(value_full_len)[0]
offset += 4
else:
offset += 1
name = content[offset:offset+name_len]
offset += name_len
value = content[offset:offset+value_len]
offset += value_len
return offset, name, value |
Python | def write_name_len(io, name):
"""Writes the length of a single name for a key or value in
a key/value stream"""
if len(name) <= 0x7f:
io.write(chr(len(name)))
else:
io.write(int_struct.pack(len(name))) | def write_name_len(io, name):
"""Writes the length of a single name for a key or value in
a key/value stream"""
if len(name) <= 0x7f:
io.write(chr(len(name)))
else:
io.write(int_struct.pack(len(name))) |
Python | def write_fastcgi_keyvalue_pairs(pairs):
"""creates a FastCGI key/value stream and returns it as a string"""
res = cStringIO.StringIO()
for key, value in pairs.iteritems():
write_name_len(res, key)
write_name_len(res, value)
res.write(key)
res.write(value)
return res.getvalue() | def write_fastcgi_keyvalue_pairs(pairs):
"""creates a FastCGI key/value stream and returns it as a string"""
res = cStringIO.StringIO()
for key, value in pairs.iteritems():
write_name_len(res, key)
write_name_len(res, value)
res.write(key)
res.write(value)
return res.getvalue() |
Python | def read_fastcgi_input(req_id, content):
"""reads FastCGI std-in and stores it in wsgi.input passed in the
wsgi environment array"""
res = _REQUESTS[req_id].params
if 'wsgi.input' not in res:
res['wsgi.input'] = content
else:
res['wsgi.input'] += content
if not content:
# we've hit the end of the input stream, time to process input...
return _REQUESTS[req_id] | def read_fastcgi_input(req_id, content):
"""reads FastCGI std-in and stores it in wsgi.input passed in the
wsgi environment array"""
res = _REQUESTS[req_id].params
if 'wsgi.input' not in res:
res['wsgi.input'] = content
else:
res['wsgi.input'] += content
if not content:
# we've hit the end of the input stream, time to process input...
return _REQUESTS[req_id] |
Python | def read_fastcgi_data(req_id, content):
"""reads FastCGI data stream and publishes it as wsgi.data"""
res = _REQUESTS[req_id].params
if 'wsgi.data' not in res:
res['wsgi.data'] = content
else:
res['wsgi.data'] += content | def read_fastcgi_data(req_id, content):
"""reads FastCGI data stream and publishes it as wsgi.data"""
res = _REQUESTS[req_id].params
if 'wsgi.data' not in res:
res['wsgi.data'] = content
else:
res['wsgi.data'] += content |
Python | def read_fastcgi_get_values(req_id, content):
"""reads the fastcgi request to get parameter values, and immediately
responds"""
offset = 0
request = {}
while offset < len(content):
offset, name, value = read_fastcgi_keyvalue_pairs(content, offset)
request[name] = value
response = {}
if FCGI_MAX_CONNS in request:
response[FCGI_MAX_CONNS] = '1'
if FCGI_MAX_REQS in request:
response[FCGI_MAX_REQS] = '1'
if FCGI_MPXS_CONNS in request:
response[FCGI_MPXS_CONNS] = '0'
send_response(req_id, FCGI_GET_VALUES_RESULT,
write_fastcgi_keyvalue_pairs(response)) | def read_fastcgi_get_values(req_id, content):
"""reads the fastcgi request to get parameter values, and immediately
responds"""
offset = 0
request = {}
while offset < len(content):
offset, name, value = read_fastcgi_keyvalue_pairs(content, offset)
request[name] = value
response = {}
if FCGI_MAX_CONNS in request:
response[FCGI_MAX_CONNS] = '1'
if FCGI_MAX_REQS in request:
response[FCGI_MAX_REQS] = '1'
if FCGI_MPXS_CONNS in request:
response[FCGI_MPXS_CONNS] = '0'
send_response(req_id, FCGI_GET_VALUES_RESULT,
write_fastcgi_keyvalue_pairs(response)) |
Python | def log(txt):
"""Logs fatal errors to a log file if WSGI_LOG env var is defined"""
log_file = os.environ.get('WSGI_LOG')
if log_file:
f = file(log_file, 'a+')
try:
f.write(str(datetime.datetime.now()))
f.write(': ')
f.write(txt)
finally:
f.close() | def log(txt):
"""Logs fatal errors to a log file if WSGI_LOG env var is defined"""
log_file = os.environ.get('WSGI_LOG')
if log_file:
f = file(log_file, 'a+')
try:
f.write(str(datetime.datetime.now()))
f.write(': ')
f.write(txt)
finally:
f.close() |
Python | def send_response(id, resp_type, content, streaming = True):
"""sends a response w/ the given id, type, and content to the server.
If the content is streaming then an empty record is sent at the end to
terminate the stream"""
offset = 0
while 1:
if id < 256:
id_0 = 0
id_1 = id
else:
id_0 = id >> 8
id_1 = id & 0xff
# content len, padding len, content
len_remaining = len(content) - offset
if len_remaining > 65535:
len_0 = 0xff
len_1 = 0xff
content_str = content[offset:offset+65535]
offset += 65535
else:
len_0 = len_remaining >> 8
len_1 = len_remaining & 0xff
content_str = content[offset:]
offset += len_remaining
data = '%c%c%c%c%c%c%c%c%s' % (
FCGI_VERSION_1, # version
resp_type, # type
id_0, # requestIdB1
id_1, # requestIdB0
len_0, # contentLengthB1
len_1, # contentLengthB0
0, # paddingLength
0, # reserved
content_str)
os.write(stdout, data)
if len_remaining == 0 or not streaming:
break
sys.stdin.flush() | def send_response(id, resp_type, content, streaming = True):
"""sends a response w/ the given id, type, and content to the server.
If the content is streaming then an empty record is sent at the end to
terminate the stream"""
offset = 0
while 1:
if id < 256:
id_0 = 0
id_1 = id
else:
id_0 = id >> 8
id_1 = id & 0xff
# content len, padding len, content
len_remaining = len(content) - offset
if len_remaining > 65535:
len_0 = 0xff
len_1 = 0xff
content_str = content[offset:offset+65535]
offset += 65535
else:
len_0 = len_remaining >> 8
len_1 = len_remaining & 0xff
content_str = content[offset:]
offset += len_remaining
data = '%c%c%c%c%c%c%c%c%s' % (
FCGI_VERSION_1, # version
resp_type, # type
id_0, # requestIdB1
id_1, # requestIdB0
len_0, # contentLengthB1
len_1, # contentLengthB0
0, # paddingLength
0, # reserved
content_str)
os.write(stdout, data)
if len_remaining == 0 or not streaming:
break
sys.stdin.flush() |
Python | def Pl(self, site, neighbors, l):
'''
Computes the power spectrum of a neighbor environment
using Steinhardt Bond Order Parameters
Args:
site: a pymatgen periodic site
neighbors: a list of pymatgen periodic sites
corresponding to the origin sites
nearest neighbors
l: free integer parameter
Returns:
pl: the power spectrum value pl of a periodic site, float
'''
# the closed set of integers [-l,l]
mvals = self._mvalues(l)
# complex vector of all qlm values
qlms = bop._qlm(site, neighbors, l, mvals)
# scalar product of complex vector
dot = bop._scalar_product(qlms, qlms)
# steinhardt bond order parameter ql
ql = bop._ql(dot, l)
# compute Power spectrum element
Pl = (2*l + 1) / (4 * np.pi) * ql**2
return Pl | def Pl(self, site, neighbors, l):
'''
Computes the power spectrum of a neighbor environment
using Steinhardt Bond Order Parameters
Args:
site: a pymatgen periodic site
neighbors: a list of pymatgen periodic sites
corresponding to the origin sites
nearest neighbors
l: free integer parameter
Returns:
pl: the power spectrum value pl of a periodic site, float
'''
# the closed set of integers [-l,l]
mvals = self._mvalues(l)
# complex vector of all qlm values
qlms = bop._qlm(site, neighbors, l, mvals)
# scalar product of complex vector
dot = bop._scalar_product(qlms, qlms)
# steinhardt bond order parameter ql
ql = bop._ql(dot, l)
# compute Power spectrum element
Pl = (2*l + 1) / (4 * np.pi) * ql**2
return Pl |
Python | def mean(self):
'''
Calculates the mean of a 2-D array along a specified axis
'''
return np.mean(self.data, axis=self._axis) | def mean(self):
'''
Calculates the mean of a 2-D array along a specified axis
'''
return np.mean(self.data, axis=self._axis) |
Python | def min(self):
'''
Calculates the minimum value of an array along a specied axis
'''
return np.amin(self.data, axis=self._axis) | def min(self):
'''
Calculates the minimum value of an array along a specied axis
'''
return np.amin(self.data, axis=self._axis) |
Python | def max(self):
'''
Calculates the maximum value of an array along a specied axis
'''
return np.amax(self.data, axis=self._axis) | def max(self):
'''
Calculates the maximum value of an array along a specied axis
'''
return np.amax(self.data, axis=self._axis) |
Python | def standard_deviation(self):
'''
Calculates the standard deviation of a 2-D array along a specified axis
if the array length is 1, return 0 for standard deviation
this fix is to ensure that no NaN values effect the ML models
'''
if np.shape(self.data) == 1:
return 0
else:
return np.std(self.data, axis=self._axis) | def standard_deviation(self):
'''
Calculates the standard deviation of a 2-D array along a specified axis
if the array length is 1, return 0 for standard deviation
this fix is to ensure that no NaN values effect the ML models
'''
if np.shape(self.data) == 1:
return 0
else:
return np.std(self.data, axis=self._axis) |
Python | def kurtosis(self):
'''
Calculates the kurtosis of a 2-D array
'''
return kurtosis(self.data, axis=self._axis) | def kurtosis(self):
'''
Calculates the kurtosis of a 2-D array
'''
return kurtosis(self.data, axis=self._axis) |
Python | def skewness(self):
'''
Calculates the skewness of a 2-D array
'''
return skew(self.data, axis=self._axis) | def skewness(self):
'''
Calculates the skewness of a 2-D array
'''
return skew(self.data, axis=self._axis) |
Python | def covariance(self, comparison_data):
'''
Computes the covariance of two feature arrays
If the feature arrays are not of equal shape,
the shorter feature array will be padded with zeros
such that they are then equal length.
Note that the covaraince matrix is symmetric, thus we only
need the upper triangular portion of the matrix
Args:
comparison data: np.float, the arrays to compute the covariance matrix over
'''
if type(comparison_data) != np.ndarray:
comparison_data = np.array(comparison_data)
if len(np.shape(comparison_data)) > 1:
comparison_data = comparison_data
else:
if np.shape(comparison_data) == ():
comparison_data = np.array([comparison_data, comparison_data])
comparison_data = comparison_data[:, np.newaxis]
if (np.shape(self.data) == np.array([1,1])).all() and (np.shape(comparison_data) == np.array([1,1])).all():
print('Covariance not defined for scalars')
raise ValueError
elif np.shape(self.data) == np.shape(comparison_data):
# covariance matrix
cov_mat = np.cov(self.data, comparison_data, rowvar=False)
# flatten upper triangular covariance matrix
return cov_mat[0,1]
elif np.shape(self.data)[0] >= np.shape(comparison_data)[0] and np.shape(self.data)[1] >= np.shape(comparison_data)[1]:
# pad comparison vector with zeros
new_array = np.zeros_like(self.data)
new_array[:np.shape(comparison_data)[0], :np.shape(comparison_data)[1]] = comparison_data
# covariance matrix
cov_mat = np.cov(self.data, new_array, rowvar=False)
# flatten the upper triangular covariance matrix
return cov_mat[0,1]
elif np.shape(self.data)[0] <= np.shape(comparison_data)[0] and np.shape(self.data)[1] >= np.shape(comparison_data)[1]:
# pad self.data with necessary zeros
new_data_array = np.zeros([np.shape(comparison_data)[0], np.shape(self.data)[1]])
new_data_array[:np.shape(self.data)[0], :np.shape(self.data)[1]] = self.data
# pad comparison data with necessary zeroes
new_comparison_array = np.zeros([np.shape(comparison_data)[0], np.shape(self.data)[1]])
new_comparison_array[:np.shape(comparison_data)[0], :np.shape(comparison_data)[1]] = comparison_data
cov_mat = np.cov(new_data_array, new_comparison_array, rowvar=False)
return cov_mat[0,1]
elif np.shape(self.data)[0] >= np.shape(comparison_data)[0] and np.shape(self.data)[1] <= np.shape(comparison_data)[1]:
# pad with necessary zeros
new_data_array = np.zeros([np.shape(self.data)[0], np.shape(comparison_data)[1]])
new_data_array[:np.shape(self.data)[0], :np.shape(self.data)[1]] = self.data
new_comparison_array = np.zeros([np.shape(self.data)[0], np.shape(comparison_data)[1]])
new_comparison_array[:np.shape(comparison_data)[0], :np.shape(comparison_data)[1]] = comparison_data
cov_mat = np.cov(new_data_array, new_comparison_array, rowvar=False)
return cov_mat[0,1]
else:
# pad self.data with zeros
new_array = np.zeros_like(comparison_data)
new_array[:np.shape(self.data)[0], :np.shape(self.data)[1]] = self.data
# covariance matrix
cov_mat = np.cov(new_array, comparison_data, rowvar=False)
# flatten the upper triangular covariance matrix
return cov_mat[0,1] | def covariance(self, comparison_data):
'''
Computes the covariance of two feature arrays
If the feature arrays are not of equal shape,
the shorter feature array will be padded with zeros
such that they are then equal length.
Note that the covaraince matrix is symmetric, thus we only
need the upper triangular portion of the matrix
Args:
comparison data: np.float, the arrays to compute the covariance matrix over
'''
if type(comparison_data) != np.ndarray:
comparison_data = np.array(comparison_data)
if len(np.shape(comparison_data)) > 1:
comparison_data = comparison_data
else:
if np.shape(comparison_data) == ():
comparison_data = np.array([comparison_data, comparison_data])
comparison_data = comparison_data[:, np.newaxis]
if (np.shape(self.data) == np.array([1,1])).all() and (np.shape(comparison_data) == np.array([1,1])).all():
print('Covariance not defined for scalars')
raise ValueError
elif np.shape(self.data) == np.shape(comparison_data):
# covariance matrix
cov_mat = np.cov(self.data, comparison_data, rowvar=False)
# flatten upper triangular covariance matrix
return cov_mat[0,1]
elif np.shape(self.data)[0] >= np.shape(comparison_data)[0] and np.shape(self.data)[1] >= np.shape(comparison_data)[1]:
# pad comparison vector with zeros
new_array = np.zeros_like(self.data)
new_array[:np.shape(comparison_data)[0], :np.shape(comparison_data)[1]] = comparison_data
# covariance matrix
cov_mat = np.cov(self.data, new_array, rowvar=False)
# flatten the upper triangular covariance matrix
return cov_mat[0,1]
elif np.shape(self.data)[0] <= np.shape(comparison_data)[0] and np.shape(self.data)[1] >= np.shape(comparison_data)[1]:
# pad self.data with necessary zeros
new_data_array = np.zeros([np.shape(comparison_data)[0], np.shape(self.data)[1]])
new_data_array[:np.shape(self.data)[0], :np.shape(self.data)[1]] = self.data
# pad comparison data with necessary zeroes
new_comparison_array = np.zeros([np.shape(comparison_data)[0], np.shape(self.data)[1]])
new_comparison_array[:np.shape(comparison_data)[0], :np.shape(comparison_data)[1]] = comparison_data
cov_mat = np.cov(new_data_array, new_comparison_array, rowvar=False)
return cov_mat[0,1]
elif np.shape(self.data)[0] >= np.shape(comparison_data)[0] and np.shape(self.data)[1] <= np.shape(comparison_data)[1]:
# pad with necessary zeros
new_data_array = np.zeros([np.shape(self.data)[0], np.shape(comparison_data)[1]])
new_data_array[:np.shape(self.data)[0], :np.shape(self.data)[1]] = self.data
new_comparison_array = np.zeros([np.shape(self.data)[0], np.shape(comparison_data)[1]])
new_comparison_array[:np.shape(comparison_data)[0], :np.shape(comparison_data)[1]] = comparison_data
cov_mat = np.cov(new_data_array, new_comparison_array, rowvar=False)
return cov_mat[0,1]
else:
# pad self.data with zeros
new_array = np.zeros_like(comparison_data)
new_array[:np.shape(self.data)[0], :np.shape(self.data)[1]] = self.data
# covariance matrix
cov_mat = np.cov(new_array, comparison_data, rowvar=False)
# flatten the upper triangular covariance matrix
return cov_mat[0,1] |
Python | def cosine_cutoff(r, rc, derivative=False):
'''
Only radial cutoff function implemented this far.
ensures that the cutoff function goes smoothly to zero at the cutoff value
args:
r: corresponds to the radial component of a spherical/hyperspherical vector valued function
double
rc: cutoff radius, double
'''
if r > rc:
return 0
else:
if derivative is True:
return -np.pi / rc / 2 * np.sin(np.pi * r / rc)
else:
return 0.5 * (np.cos(np.pi * r / rc) + 1.) | def cosine_cutoff(r, rc, derivative=False):
'''
Only radial cutoff function implemented this far.
ensures that the cutoff function goes smoothly to zero at the cutoff value
args:
r: corresponds to the radial component of a spherical/hyperspherical vector valued function
double
rc: cutoff radius, double
'''
if r > rc:
return 0
else:
if derivative is True:
return -np.pi / rc / 2 * np.sin(np.pi * r / rc)
else:
return 0.5 * (np.cos(np.pi * r / rc) + 1.) |
Python | def populate_cg_array(j_max, in_arr):
'''
Populate a 5-D array with the Clebsch-Gordon coupling coefficients
given a j max value, this is intended for specific use with the bispectrum
The intent is to use this at the highest level of execution to minimize computation
We map the clebsch gordon arguments to array indeces using the following relations
CG(J1, M1, J2, M2, J, M)
Jn -> Jn/2
Mn -> Mn - Jn/2
args:
j_max: specify the degree of bispectrum calculation
in_arr: supplied 5-D array to insert CG coefficients
'''
# initiate a 5-d array with size 2*j_max+1
twojmax = 2 * j_max
size = twojmax + 1
cgs = in_arr
# index j1 and j2 from 0 to 2jmax
for j1 in range(size):
for j2 in range(j1 + 1):
# there exists a symmetry for j given by this array
js = np.arange(np.abs(j1 - j2), min(twojmax, j1 + j2) + 1, 2)
for j in js:
# index the sum from 0 to j1
for m1 in range(j1 + 1):
aa2 = 2 * m1 - j1
# index the sym from 0 to j2
for m2 in range(j2 + 1):
bb2 = 2 * m2 - j2
# aa2 and bb2 ensure that m1 and m2 = m in the CG calc
m = (aa2 + bb2 + j) / 2
'''
restrict z-angular momentum to be a positive value
not larger than total angular momentum
'''
if m < 0 or m > j:
continue
# convert array arguments to CG args
J1 = j1 / 2
J2 = j2 / 2
J = j / 2
M1 = m1 - J1
M2 = m2 - J2
M = m - J
# add CG coef to cgs array
cgs[j1, j2, j, m1, m2] = CG(J1, M1, J2, M2, J, M) | def populate_cg_array(j_max, in_arr):
'''
Populate a 5-D array with the Clebsch-Gordon coupling coefficients
given a j max value, this is intended for specific use with the bispectrum
The intent is to use this at the highest level of execution to minimize computation
We map the clebsch gordon arguments to array indeces using the following relations
CG(J1, M1, J2, M2, J, M)
Jn -> Jn/2
Mn -> Mn - Jn/2
args:
j_max: specify the degree of bispectrum calculation
in_arr: supplied 5-D array to insert CG coefficients
'''
# initiate a 5-d array with size 2*j_max+1
twojmax = 2 * j_max
size = twojmax + 1
cgs = in_arr
# index j1 and j2 from 0 to 2jmax
for j1 in range(size):
for j2 in range(j1 + 1):
# there exists a symmetry for j given by this array
js = np.arange(np.abs(j1 - j2), min(twojmax, j1 + j2) + 1, 2)
for j in js:
# index the sum from 0 to j1
for m1 in range(j1 + 1):
aa2 = 2 * m1 - j1
# index the sym from 0 to j2
for m2 in range(j2 + 1):
bb2 = 2 * m2 - j2
# aa2 and bb2 ensure that m1 and m2 = m in the CG calc
m = (aa2 + bb2 + j) / 2
'''
restrict z-angular momentum to be a positive value
not larger than total angular momentum
'''
if m < 0 or m > j:
continue
# convert array arguments to CG args
J1 = j1 / 2
J2 = j2 / 2
J = j / 2
M1 = m1 - J1
M2 = m2 - J2
M = m - J
# add CG coef to cgs array
cgs[j1, j2, j, m1, m2] = CG(J1, M1, J2, M2, J, M) |
Python | def U(j, m, m_prime, psi, theta, phi):
'''
Computes the 4-D hyperspherical harmonic given the three angular coordinates
and indices
args:
j: free integer parameter, used to index arrays, corresponds to free half integral/
integral constants used for calculation, int
m, mp: free integer parameter, used to index arrays, cooresponds to free half integral/
integral constants used for calculation, int
returns:
the 4-D hyperspherical harmonic (wigner_U) function, complex
'''
j = j / 2
m = m - j
m_prime = m_prime - j
sph_harm = 0. + 0.j
mvals = np.arange(-j, j + 1, 1)
for mpp in mvals:
sph_harm += wigner_D(j, m, mpp, phi, theta, -phi, False) * \
np.exp(-1j * mpp * psi) * \
wigner_D(j, mpp, m_prime, phi, -theta, -phi, False)
return sph_harm.conjugate() | def U(j, m, m_prime, psi, theta, phi):
'''
Computes the 4-D hyperspherical harmonic given the three angular coordinates
and indices
args:
j: free integer parameter, used to index arrays, corresponds to free half integral/
integral constants used for calculation, int
m, mp: free integer parameter, used to index arrays, cooresponds to free half integral/
integral constants used for calculation, int
returns:
the 4-D hyperspherical harmonic (wigner_U) function, complex
'''
j = j / 2
m = m - j
m_prime = m_prime - j
sph_harm = 0. + 0.j
mvals = np.arange(-j, j + 1, 1)
for mpp in mvals:
sph_harm += wigner_D(j, m, mpp, phi, theta, -phi, False) * \
np.exp(-1j * mpp * psi) * \
wigner_D(j, mpp, m_prime, phi, -theta, -phi, False)
return sph_harm.conjugate() |
Python | def dUdr(j, m, m_prime, psi, theta, phi, dpsidr, dthetadr):
'''
Computes the derivative with respect to r of the 4-D hyperspherical harmonic
the hyperspherical harmonic is dependent on r as theta and psi are dependent on r
hence this derivative is derived from the chain rule
'''
j = j / 2
m = m - j
m_prime = m_prime - j
mvals = np.arange(-j, j + 1, 1)
dUdtheta = 0. + 0.j
dUdpsi = 0. + 0.j
for mpp in mvals:
dUdtheta += wigner_D(j, m, mpp, phi, theta, -phi, True) * \
np.exp(-1j * mpp * psi) * \
wigner_D(j, mpp, m_prime, phi, -theta, -phi, False)
dUdtheta -= wigner_D(j, m, mpp, phi, theta, -phi, False) * \
np.exp(-1j * mpp * psi) * \
wigner_D(j, mpp, m_prime, phi, -theta, -phi, True)
dUdpsi += mpp * wigner_D(j, m, mpp, phi, theta, -phi, False) * \
np.exp(-1j * mpp * psi) * \
wigner_D(j, mpp, m_prime, phi, -theta, -phi, False)
dUdpsi *= -1j
dUdr = dUdpsi * dpsidr + dUdtheta * dthetadr
return dUdr.conjugate() | def dUdr(j, m, m_prime, psi, theta, phi, dpsidr, dthetadr):
'''
Computes the derivative with respect to r of the 4-D hyperspherical harmonic
the hyperspherical harmonic is dependent on r as theta and psi are dependent on r
hence this derivative is derived from the chain rule
'''
j = j / 2
m = m - j
m_prime = m_prime - j
mvals = np.arange(-j, j + 1, 1)
dUdtheta = 0. + 0.j
dUdpsi = 0. + 0.j
for mpp in mvals:
dUdtheta += wigner_D(j, m, mpp, phi, theta, -phi, True) * \
np.exp(-1j * mpp * psi) * \
wigner_D(j, mpp, m_prime, phi, -theta, -phi, False)
dUdtheta -= wigner_D(j, m, mpp, phi, theta, -phi, False) * \
np.exp(-1j * mpp * psi) * \
wigner_D(j, mpp, m_prime, phi, -theta, -phi, True)
dUdpsi += mpp * wigner_D(j, m, mpp, phi, theta, -phi, False) * \
np.exp(-1j * mpp * psi) * \
wigner_D(j, mpp, m_prime, phi, -theta, -phi, False)
dUdpsi *= -1j
dUdr = dUdpsi * dpsidr + dUdtheta * dthetadr
return dUdr.conjugate() |
Python | def compute_C(j, m, mp, hypersphere_coords, rbf_vals, cutoff_vals):
'''
Computes the harmonic expansion coefficients for a function on the 3 sphere
Args:
j, m, mp: indeces for 4-D hyperspherical harmonics, int
returns:
expansion coefficients, complex
'''
dot = U(j, m, mp, 0, 0, 0)
for i in range(len(hypersphere_coords)):
psi = hypersphere_coords[i, 0]
theta = hypersphere_coords[i, 1]
phi = hypersphere_coords[i, 2]
harmonic = U(j, m, mp, psi, theta, phi)
dot += harmonic * rbf_vals[i] * cutoff_vals[i]
return dot | def compute_C(j, m, mp, hypersphere_coords, rbf_vals, cutoff_vals):
'''
Computes the harmonic expansion coefficients for a function on the 3 sphere
Args:
j, m, mp: indeces for 4-D hyperspherical harmonics, int
returns:
expansion coefficients, complex
'''
dot = U(j, m, mp, 0, 0, 0)
for i in range(len(hypersphere_coords)):
psi = hypersphere_coords[i, 0]
theta = hypersphere_coords[i, 1]
phi = hypersphere_coords[i, 2]
harmonic = U(j, m, mp, psi, theta, phi)
dot += harmonic * rbf_vals[i] * cutoff_vals[i]
return dot |
Python | def dCdr(
j,
m,
mp,
hypersphere_coords,
rbf_vals,
cutoff_vals,
dpsis,
dthetas,
dcutoffs):
'''
derivative of expansion coefficients with respect to r
'''
deriv = 0. + 0.j
for i in range(len(hypersphere_coords)):
psi = hypersphere_coords[i, 0]
theta = hypersphere_coords[i, 1]
if abs(theta) < 0.01 or abs(theta-np.pi/2) < 0.01 or abs(theta-np.pi) < 0.01:
continue
phi = hypersphere_coords[i, 2]
dpsi = dpsis[i]
dtheta = dthetas[i]
dharmonic = dUdr(j, m, mp, psi, theta, phi, dpsi, dtheta)
harmonic = U(j, m, mp, psi, theta, phi)
deriv += (dharmonic * cutoff_vals[i] +
harmonic * dcutoffs[i]) * rbf_vals[i]
return deriv | def dCdr(
j,
m,
mp,
hypersphere_coords,
rbf_vals,
cutoff_vals,
dpsis,
dthetas,
dcutoffs):
'''
derivative of expansion coefficients with respect to r
'''
deriv = 0. + 0.j
for i in range(len(hypersphere_coords)):
psi = hypersphere_coords[i, 0]
theta = hypersphere_coords[i, 1]
if abs(theta) < 0.01 or abs(theta-np.pi/2) < 0.01 or abs(theta-np.pi) < 0.01:
continue
phi = hypersphere_coords[i, 2]
dpsi = dpsis[i]
dtheta = dthetas[i]
dharmonic = dUdr(j, m, mp, psi, theta, phi, dpsi, dtheta)
harmonic = U(j, m, mp, psi, theta, phi)
deriv += (dharmonic * cutoff_vals[i] +
harmonic * dcutoffs[i]) * rbf_vals[i]
return deriv |
Python | def populate_C_array(jmax, in_arr, hypersphere_coords, rbf_vals, cutoff_vals):
'''
Populates the array of the expansion coefficients from compute_C
args:
jmax: degree of bispectrum calculation, int
in_arr: reference to array to populate, complex, 3-D
hypersphere_coords: 2-D array of psis, thetas, phis from 4-d spherical coordinate system, float
rbf_vals: 1-D array of function values on the 3 sphere, float
cutoff_vals: 1-D array of cutoff function values on the 3 sphere, float
note that these arrays should be the same length
We map the inner product arguments to array indeces using the following relations
J -> J/2
Mn -> Mn - J/2
'''
# array size
twojmax = 2 * jmax
size = twojmax + 1
# reference to input array
cs = in_arr
for j in range(size):
ms = np.arange(0, j + 1, 1)
for ma in ms:
for mb in ms:
cs[j, mb, ma] = compute_C(
j, mb, ma, hypersphere_coords, rbf_vals, cutoff_vals) | def populate_C_array(jmax, in_arr, hypersphere_coords, rbf_vals, cutoff_vals):
'''
Populates the array of the expansion coefficients from compute_C
args:
jmax: degree of bispectrum calculation, int
in_arr: reference to array to populate, complex, 3-D
hypersphere_coords: 2-D array of psis, thetas, phis from 4-d spherical coordinate system, float
rbf_vals: 1-D array of function values on the 3 sphere, float
cutoff_vals: 1-D array of cutoff function values on the 3 sphere, float
note that these arrays should be the same length
We map the inner product arguments to array indeces using the following relations
J -> J/2
Mn -> Mn - J/2
'''
# array size
twojmax = 2 * jmax
size = twojmax + 1
# reference to input array
cs = in_arr
for j in range(size):
ms = np.arange(0, j + 1, 1)
for ma in ms:
for mb in ms:
cs[j, mb, ma] = compute_C(
j, mb, ma, hypersphere_coords, rbf_vals, cutoff_vals) |
Python | def populate_dCdr_array(
jmax,
in_arr,
hypersphere_coords,
rbf_vals,
cutoff_vals,
dpsis,
dthetas,
dcutoffs):
'''
populates a pre computed array of expansion coefficients
'''
twojmax = 2 * jmax
size = twojmax + 1
dcs = in_arr
for j in range(size):
ms = np.arange(0, j + 1, 1)
for ma in ms:
for mb in ms:
dcs[j,
mb,
ma] = dCdr(j,
mb,
ma,
hypersphere_coords,
rbf_vals,
cutoff_vals,
dpsis,
dthetas,
dcutoffs) | def populate_dCdr_array(
jmax,
in_arr,
hypersphere_coords,
rbf_vals,
cutoff_vals,
dpsis,
dthetas,
dcutoffs):
'''
populates a pre computed array of expansion coefficients
'''
twojmax = 2 * jmax
size = twojmax + 1
dcs = in_arr
for j in range(size):
ms = np.arange(0, j + 1, 1)
for ma in ms:
for mb in ms:
dcs[j,
mb,
ma] = dCdr(j,
mb,
ma,
hypersphere_coords,
rbf_vals,
cutoff_vals,
dpsis,
dthetas,
dcutoffs) |
Python | def compute_bispectrum(jmax, cs, zs, in_arr):
'''
Computes the bispectrum given pre computed C arrays and Z arrays
args:
jmax: degree of bispectrum, int
cs: 3D array of precomtued inner products of
hyperspherical harmonics, function,
and cutoff function, complex
zs: 5-D array of pre computed sums, complex
'''
twojmax = 2 * jmax
size = twojmax + 1
bis = in_arr
for j1 in range(size):
for j2 in range(j1 + 1):
js = np.arange(np.abs(j1 - j2), min(twojmax, j1 + j2) + 1, 2)
for j in js:
if j1 > j:
continue
mb = 0
while 2 * mb <= j:
for ma in range(j + 1):
c = cs[int(j), int(ma), int(mb)]
z = zs[int(j1), int(j2), int(j), int(ma), int(mb)]
bis[int(j1), int(j2), int(j)] += c.conjugate() * z
mb += 1 | def compute_bispectrum(jmax, cs, zs, in_arr):
'''
Computes the bispectrum given pre computed C arrays and Z arrays
args:
jmax: degree of bispectrum, int
cs: 3D array of precomtued inner products of
hyperspherical harmonics, function,
and cutoff function, complex
zs: 5-D array of pre computed sums, complex
'''
twojmax = 2 * jmax
size = twojmax + 1
bis = in_arr
for j1 in range(size):
for j2 in range(j1 + 1):
js = np.arange(np.abs(j1 - j2), min(twojmax, j1 + j2) + 1, 2)
for j in js:
if j1 > j:
continue
mb = 0
while 2 * mb <= j:
for ma in range(j + 1):
c = cs[int(j), int(ma), int(mb)]
z = zs[int(j1), int(j2), int(j), int(ma), int(mb)]
bis[int(j1), int(j2), int(j)] += c.conjugate() * z
mb += 1 |
Python | def plot_correlation(self, figname=None, figsize=(12,8)):
"""
Plot the correlation between prediction and target values.
"""
plt.figure(figsize=figsize)
plt.scatter(self.y_test, self.Y_test, c='green', label='test')
plt.scatter(self.y_train, self.Y_train, c='blue', label='train')
plt.title('{0:d} materials, r$^2$ = {1:.4f}, Algo: {2:s}'.format(
len(self.prop), self.r2, self.algo))
plt.xlabel('Prediction')
plt.ylabel(self.tag['prop'])
plt.legend()
if figname is None:
plt.show()
else:
plt.savefig(figname)
plt.close() | def plot_correlation(self, figname=None, figsize=(12,8)):
"""
Plot the correlation between prediction and target values.
"""
plt.figure(figsize=figsize)
plt.scatter(self.y_test, self.Y_test, c='green', label='test')
plt.scatter(self.y_train, self.Y_train, c='blue', label='train')
plt.title('{0:d} materials, r$^2$ = {1:.4f}, Algo: {2:s}'.format(
len(self.prop), self.r2, self.algo))
plt.xlabel('Prediction')
plt.ylabel(self.tag['prop'])
plt.legend()
if figname is None:
plt.show()
else:
plt.savefig(figname)
plt.close() |
Python | def plot_distribution(self, figname=None, figsize=(12,8)):
"""
some other plots to facilate the results
"""
plt.figure(figsize=figsize)
plt.hist(self.Y, bins = 100)
plt.xlabel(self.tag['prop'])
if figname is None:
plt.show()
else:
plt.savefig(figname)
plt.close() | def plot_distribution(self, figname=None, figsize=(12,8)):
"""
some other plots to facilate the results
"""
plt.figure(figsize=figsize)
plt.hist(self.Y, bins = 100)
plt.xlabel(self.tag['prop'])
if figname is None:
plt.show()
else:
plt.savefig(figname)
plt.close() |
Python | def _create_RDF_table(self):
'''
Creates a dictionary with tuples corresponding to array indeces
for all possible pairwise element combinations, then populate
the initial PRDF array as zeros.
'''
self._prdf_indeces = {}
elements = []
for element in ele_data.keys():
elements.append(str(element))
arr_len = int(self._R_max / self._R_bin)
# all possible pairwise combinations without repeated entries
combs = itertools.combinations_with_replacement(elements, 2)
'''populate every possible element pairwise combination with each
combination in alphabetical order '''
index = 0
for comb in combs:
'''the if and else condtions ensure that the elements are in
alphabetical order
the pairwise element combinations ( Bi-Te ) are used as keys to
access a dictionary of tuples corresponding to the indeces of the
PRDF array corresponding to that combinations distribution function'''
if comb[0] <= comb[1]:
self._prdf_indeces[comb[0]+'-'+comb[1]
] = (index * arr_len, index * arr_len + arr_len)
else:
self._prdf_indeces[comb[1]+'-'+comb[0]
] = (index * arr_len, index * arr_len + arr_len)
index += 1
self.PRDF = np.zeros(arr_len * index) | def _create_RDF_table(self):
'''
Creates a dictionary with tuples corresponding to array indeces
for all possible pairwise element combinations, then populate
the initial PRDF array as zeros.
'''
self._prdf_indeces = {}
elements = []
for element in ele_data.keys():
elements.append(str(element))
arr_len = int(self._R_max / self._R_bin)
# all possible pairwise combinations without repeated entries
combs = itertools.combinations_with_replacement(elements, 2)
'''populate every possible element pairwise combination with each
combination in alphabetical order '''
index = 0
for comb in combs:
'''the if and else condtions ensure that the elements are in
alphabetical order
the pairwise element combinations ( Bi-Te ) are used as keys to
access a dictionary of tuples corresponding to the indeces of the
PRDF array corresponding to that combinations distribution function'''
if comb[0] <= comb[1]:
self._prdf_indeces[comb[0]+'-'+comb[1]
] = (index * arr_len, index * arr_len + arr_len)
else:
self._prdf_indeces[comb[1]+'-'+comb[0]
] = (index * arr_len, index * arr_len + arr_len)
index += 1
self.PRDF = np.zeros(arr_len * index) |
Python | def _compute_PRDF(self):
'''
Compute the pairwise radial distribution functions of all
possible combinations of constituent elements in the given crystal
structure
'''
# get all neighbors up to R_max
neighbors = self._crystal.get_all_neighbors(self._R_max)
'''convert elements list from element objects to strings for indexing
the prdf dictionary'''
elements = [str(ele) for ele in self._elements]
'''Populate a dictionary of empty lists with keys given by all
possible element combinations in the crystal structure
This dictionary will be used to store pairwise distances'''
distances = {}
combs = itertools.combinations_with_replacement(elements, 2)
for comb in combs:
'''conditions ensure that dictionary keys are ordered
alphabetically according to the element symbols'''
if comb[0] <= comb[1]:
distances[comb[0]+'-'+comb[1]] = []
else:
distances[comb[1]+'-'+comb[0]] = []
'''populate the pairwise distance dictionary using the element
species at the origin site and all neighbor sites
the distances are called from the 2nd element of the tuple
neighbors[i][j][1]
the first element in the tuple is the site information'''
for i, site in enumerate(self._crystal):
ele_1 = self._crystal[i].species_string
for j, neighbor in enumerate(neighbors[i]):
ele_2 = neighbors[i][j][0].species_string
'''again the conditions ensure that the element combinations
are ordered alphabetically'''
if ele_1 <= ele_2:
comb = ele_1+'-'+ele_2
distances[comb].append(neighbors[i][j][1])
else:
comb = ele_2+'-'+ele_1
distances[comb].append(neighbors[i][j][1])
# distance bins used for the pairwise RDF
bins = np.arange(0, self._R_max+self._R_bin, self._R_bin)
'''compute the shell volume using the first and last element
of the distance bins'''
shell_volume = 4/3 * np.pi * (np.power(bins[1:], 3) -
np.power(bins[:-1], 3))
# compute the site density using pymatgen structure attributes
site_density = self._crystal.num_sites / self._crystal.volume
# length of neighbors array (the number of atoms in the primitive cell)
neighbors_length = len(neighbors)
'''populate the prdf_dict with the pairwise rdfs associated with the
distance information in the distance dictionary'''
for comb in distances.keys():
'''use numpy's histogram function to find RDF'''
# only compute the RDF if the list is nonempty
if len(distances[comb]) == 0:
self.ErrorMsg.append('{0} is empty in {1}, perhaps need to increase R_max'.format(
comb, self._crystal.formula))
continue
hist, _ = np.histogram(distances[comb], bins, density=False)
# RDF = counts / (volume * site density * sites in primitive cell)
rdf = (hist / shell_volume / site_density / neighbors_length)
# call the indeces corresponding to the element combination
index_1, index_2 = self._prdf_indeces[comb]
# populate the corresponding array slice with the PRDF
self.PRDF[index_1:index_2] = rdf | def _compute_PRDF(self):
'''
Compute the pairwise radial distribution functions of all
possible combinations of constituent elements in the given crystal
structure
'''
# get all neighbors up to R_max
neighbors = self._crystal.get_all_neighbors(self._R_max)
'''convert elements list from element objects to strings for indexing
the prdf dictionary'''
elements = [str(ele) for ele in self._elements]
'''Populate a dictionary of empty lists with keys given by all
possible element combinations in the crystal structure
This dictionary will be used to store pairwise distances'''
distances = {}
combs = itertools.combinations_with_replacement(elements, 2)
for comb in combs:
'''conditions ensure that dictionary keys are ordered
alphabetically according to the element symbols'''
if comb[0] <= comb[1]:
distances[comb[0]+'-'+comb[1]] = []
else:
distances[comb[1]+'-'+comb[0]] = []
'''populate the pairwise distance dictionary using the element
species at the origin site and all neighbor sites
the distances are called from the 2nd element of the tuple
neighbors[i][j][1]
the first element in the tuple is the site information'''
for i, site in enumerate(self._crystal):
ele_1 = self._crystal[i].species_string
for j, neighbor in enumerate(neighbors[i]):
ele_2 = neighbors[i][j][0].species_string
'''again the conditions ensure that the element combinations
are ordered alphabetically'''
if ele_1 <= ele_2:
comb = ele_1+'-'+ele_2
distances[comb].append(neighbors[i][j][1])
else:
comb = ele_2+'-'+ele_1
distances[comb].append(neighbors[i][j][1])
# distance bins used for the pairwise RDF
bins = np.arange(0, self._R_max+self._R_bin, self._R_bin)
'''compute the shell volume using the first and last element
of the distance bins'''
shell_volume = 4/3 * np.pi * (np.power(bins[1:], 3) -
np.power(bins[:-1], 3))
# compute the site density using pymatgen structure attributes
site_density = self._crystal.num_sites / self._crystal.volume
# length of neighbors array (the number of atoms in the primitive cell)
neighbors_length = len(neighbors)
'''populate the prdf_dict with the pairwise rdfs associated with the
distance information in the distance dictionary'''
for comb in distances.keys():
'''use numpy's histogram function to find RDF'''
# only compute the RDF if the list is nonempty
if len(distances[comb]) == 0:
self.ErrorMsg.append('{0} is empty in {1}, perhaps need to increase R_max'.format(
comb, self._crystal.formula))
continue
hist, _ = np.histogram(distances[comb], bins, density=False)
# RDF = counts / (volume * site density * sites in primitive cell)
rdf = (hist / shell_volume / site_density / neighbors_length)
# call the indeces corresponding to the element combination
index_1, index_2 = self._prdf_indeces[comb]
# populate the corresponding array slice with the PRDF
self.PRDF[index_1:index_2] = rdf |
Python | def smear(data, sigma):
"""
Apply Gaussian smearing to spectrum y value.
Args:
sigma: Std dev for Gaussian smear function
"""
diff = [data[0, i + 1] - data[0, i] for i in range(np.shape(data)[0] - 1)]
avg_x_per_step = np.sum(diff) / len(diff)
data[1, :] = gaussian_filter1d(data[1, :], sigma / avg_x_per_step)
return data | def smear(data, sigma):
"""
Apply Gaussian smearing to spectrum y value.
Args:
sigma: Std dev for Gaussian smear function
"""
diff = [data[0, i + 1] - data[0, i] for i in range(np.shape(data)[0] - 1)]
avg_x_per_step = np.sum(diff) / len(diff)
data[1, :] = gaussian_filter1d(data[1, :], sigma / avg_x_per_step)
return data |
Python | def compute_RDF(self, crystal):
"""
Computes the radial distribution function of a given crystal.
Args:
self: RDF
crystal: Crystal structure information
Returns: None
"""
R_max = self.R_max
R_bin = self.R_bin
# below is the old code before vectorization
# R: the array which contains the number of occurences of atomic pairs
# in each [dmin, dmax].
# rij_dot: the atomic coordinates of supercell in cartesian format
# rij_dist: the distance matrix between atoms in the big cell and in
# the small cell
# the idea is
# 1, to loop over all atoms in the small cell
# 2, calculate rij_dist
# 3, for each distance bin [dmin, dmax], count the occurences of
# distances
# R = np.zeros(round(R_max/R_bin))
# rij_dot = self.find_supercell(crystal, R_max)
# for atom in crystal.frac_coords:
# origin = [np.dot(atom, crystal.lattice.matrix)]
# rij_dist = cdist(rij_dot, origin)
# for i in range(len(R)):
# d_min, d_max = R_bin*(i+0.5), R_bin*(i+1.5)
# R[i] += len([x for x in rij_dist if d_min<=x<d_max])
# vectotrized version:
# Vectorizing code refers to operations that are performed
# on multiple components of a vector from a single statement
# apply_along_axis applies a function across the dimension of an array
# in essence, it is an optimized for loop.
# see find supercell method
rij_dot = self.find_supercell(crystal, R_max)
length = round(R_max/R_bin) # length of distance array
# create minimum distance vector and add dimension
d_min = np.arange(0.5, length+0.5, 1)
# create maximum distance vector and add dimension
d_max = np.arange(1.5, length+1.5, 1)
# stack the min and max distance vectors into array
d = np.vstack((d_min, d_max))*R_bin
def compute_rij_dist(atom):
"""
Computes the distance between atoms in the unit cell and atoms in
the supercell
Args:
atom = Fractional coordinates of the atom within the unit cell
Returns: the euclidean distance between atoms in the unit cell and
atoms in the supercell
"""
# dot product of atomic fractional coordinates and lattice matrix
origin = np.dot(atom, crystal.lattice.matrix)
origin = origin[np.newaxis, :] # add dimension to array
return cdist(rij_dot, origin)
# loop over fractional coordinates of each atom in the crystal to
# compute an array of euclidean distances
rij_dist = np.apply_along_axis(compute_rij_dist, axis=1,
arr=crystal.frac_coords)
def compute_R(span):
"""
Counts the euclidean distances within a bin range
Args:
span = An ordered pair of min and max distances for the bin
Returns: an count of distances within the bin range
"""
return ((span[0] <= rij_dist) & (rij_dist < span[1])).sum()
# R: the array which contains the number of occurences of atomic pairs
# in each [dmin, dmax].
R = np.apply_along_axis(compute_R, axis=0, arr=d)
# radii in angstrom
r = np.arange(1, length+1, 1)*R_bin
# now calculate RDF based on the equation *** (reference from the book)
r = np.arange(1, length+1, 1)*R_bin
rho = len(crystal.frac_coords)/crystal.volume
R = R/(4*np.pi*R_bin*rho**2*crystal.volume) * np.power(r, -2)
self.RDF = np.vstack((r, R))
return | def compute_RDF(self, crystal):
"""
Computes the radial distribution function of a given crystal.
Args:
self: RDF
crystal: Crystal structure information
Returns: None
"""
R_max = self.R_max
R_bin = self.R_bin
# below is the old code before vectorization
# R: the array which contains the number of occurences of atomic pairs
# in each [dmin, dmax].
# rij_dot: the atomic coordinates of supercell in cartesian format
# rij_dist: the distance matrix between atoms in the big cell and in
# the small cell
# the idea is
# 1, to loop over all atoms in the small cell
# 2, calculate rij_dist
# 3, for each distance bin [dmin, dmax], count the occurences of
# distances
# R = np.zeros(round(R_max/R_bin))
# rij_dot = self.find_supercell(crystal, R_max)
# for atom in crystal.frac_coords:
# origin = [np.dot(atom, crystal.lattice.matrix)]
# rij_dist = cdist(rij_dot, origin)
# for i in range(len(R)):
# d_min, d_max = R_bin*(i+0.5), R_bin*(i+1.5)
# R[i] += len([x for x in rij_dist if d_min<=x<d_max])
# vectotrized version:
# Vectorizing code refers to operations that are performed
# on multiple components of a vector from a single statement
# apply_along_axis applies a function across the dimension of an array
# in essence, it is an optimized for loop.
# see find supercell method
rij_dot = self.find_supercell(crystal, R_max)
length = round(R_max/R_bin) # length of distance array
# create minimum distance vector and add dimension
d_min = np.arange(0.5, length+0.5, 1)
# create maximum distance vector and add dimension
d_max = np.arange(1.5, length+1.5, 1)
# stack the min and max distance vectors into array
d = np.vstack((d_min, d_max))*R_bin
def compute_rij_dist(atom):
"""
Computes the distance between atoms in the unit cell and atoms in
the supercell
Args:
atom = Fractional coordinates of the atom within the unit cell
Returns: the euclidean distance between atoms in the unit cell and
atoms in the supercell
"""
# dot product of atomic fractional coordinates and lattice matrix
origin = np.dot(atom, crystal.lattice.matrix)
origin = origin[np.newaxis, :] # add dimension to array
return cdist(rij_dot, origin)
# loop over fractional coordinates of each atom in the crystal to
# compute an array of euclidean distances
rij_dist = np.apply_along_axis(compute_rij_dist, axis=1,
arr=crystal.frac_coords)
def compute_R(span):
"""
Counts the euclidean distances within a bin range
Args:
span = An ordered pair of min and max distances for the bin
Returns: an count of distances within the bin range
"""
return ((span[0] <= rij_dist) & (rij_dist < span[1])).sum()
# R: the array which contains the number of occurences of atomic pairs
# in each [dmin, dmax].
R = np.apply_along_axis(compute_R, axis=0, arr=d)
# radii in angstrom
r = np.arange(1, length+1, 1)*R_bin
# now calculate RDF based on the equation *** (reference from the book)
r = np.arange(1, length+1, 1)*R_bin
rho = len(crystal.frac_coords)/crystal.volume
R = R/(4*np.pi*R_bin*rho**2*crystal.volume) * np.power(r, -2)
self.RDF = np.vstack((r, R))
return |
Python | def compute_rij_dist(atom):
"""
Computes the distance between atoms in the unit cell and atoms in
the supercell
Args:
atom = Fractional coordinates of the atom within the unit cell
Returns: the euclidean distance between atoms in the unit cell and
atoms in the supercell
"""
# dot product of atomic fractional coordinates and lattice matrix
origin = np.dot(atom, crystal.lattice.matrix)
origin = origin[np.newaxis, :] # add dimension to array
return cdist(rij_dot, origin) | def compute_rij_dist(atom):
"""
Computes the distance between atoms in the unit cell and atoms in
the supercell
Args:
atom = Fractional coordinates of the atom within the unit cell
Returns: the euclidean distance between atoms in the unit cell and
atoms in the supercell
"""
# dot product of atomic fractional coordinates and lattice matrix
origin = np.dot(atom, crystal.lattice.matrix)
origin = origin[np.newaxis, :] # add dimension to array
return cdist(rij_dot, origin) |
Python | def compute_R(span):
"""
Counts the euclidean distances within a bin range
Args:
span = An ordered pair of min and max distances for the bin
Returns: an count of distances within the bin range
"""
return ((span[0] <= rij_dist) & (rij_dist < span[1])).sum() | def compute_R(span):
"""
Counts the euclidean distances within a bin range
Args:
span = An ordered pair of min and max distances for the bin
Returns: an count of distances within the bin range
"""
return ((span[0] <= rij_dist) & (rij_dist < span[1])).sum() |
Python | def load_data(self):
"""
Obtain the struc/prop data from source.
"""
start = time()
self.strucs, self.props = Collection(self.file,
self.prop,
self.N_sample).\
extract_struc_prop()
end = time()
self.time['load_data'] = end-start | def load_data(self):
"""
Obtain the struc/prop data from source.
"""
start = time()
self.strucs, self.props = Collection(self.file,
self.prop,
self.N_sample).\
extract_struc_prop()
end = time()
self.time['load_data'] = end-start |
Python | def convert_data_1D(self, parallel=False, progress=True):
"""
Convert the structures to descriptors in the format of 1D array.
"""
start = time()
if progress:
import tqdm
tqdm_func = tqdm.tqdm
strucs = tqdm_func(list(self.strucs), desc=self.__class__.__name__)
if parallel:
from multiprocessing import Pool, cpu_count
from functools import partial
# QZ: it is not a good idea to use too many number of cpus due to
# communication. Usually, 2-8 should be sufficient.
if type(parallel)==bool:
ncpu = cpu_count()
else:
ncpu = int(parallel)
print('---Parallel mode is on, {} cores with be used'.format(ncpu))
with Pool(ncpu) as p:
func = partial(self.calc_feas)
feas = p.map(func, strucs)
p.close()
p.join()
else:
feas = []
for struc in strucs:
feas.append(self.calc_feas(struc))
end = time()
self.time['convert_data'] = end-start
self.features = feas | def convert_data_1D(self, parallel=False, progress=True):
"""
Convert the structures to descriptors in the format of 1D array.
"""
start = time()
if progress:
import tqdm
tqdm_func = tqdm.tqdm
strucs = tqdm_func(list(self.strucs), desc=self.__class__.__name__)
if parallel:
from multiprocessing import Pool, cpu_count
from functools import partial
# QZ: it is not a good idea to use too many number of cpus due to
# communication. Usually, 2-8 should be sufficient.
if type(parallel)==bool:
ncpu = cpu_count()
else:
ncpu = int(parallel)
print('---Parallel mode is on, {} cores with be used'.format(ncpu))
with Pool(ncpu) as p:
func = partial(self.calc_feas)
feas = p.map(func, strucs)
p.close()
p.join()
else:
feas = []
for struc in strucs:
feas.append(self.calc_feas(struc))
end = time()
self.time['convert_data'] = end-start
self.features = feas |
Python | def calc_feas(self, struc, print_error=True):
"""
Calculate user-defined features to a set of descriptors.
Returns:
an object of calculated descriptors or an empty array.
"""
try:
feas = descriptor(struc, self.feature0)
except:
feas = []
if print_error:
print('Problems in :', struc.formula)
return feas | def calc_feas(self, struc, print_error=True):
"""
Calculate user-defined features to a set of descriptors.
Returns:
an object of calculated descriptors or an empty array.
"""
try:
feas = descriptor(struc, self.feature0)
except:
feas = []
if print_error:
print('Problems in :', struc.formula)
return feas |
Python | def ml_train(self, library='sklearn',
algo='KRR',
level='medium',
pipeline=False,
hidden_layers=None,
conv_layers=None,
plot=False,
print_info=True,
save=False):
"""
Build machine learning model for X/Y set.
Args:
library: sklearn, tensorflow, or pytorch
algo: the training algorithm.
level: The tightness level of training (light, medium, or tight)
pipeline: include preprocessing algorithm for pipelining
in sequence.
hidden_layers: hidden layers for neural network or
deep learning only.
plot: include plotting. (default = False)
print_info: print training and results information.
(default = True)
save: save the training simulation for future usage.
(default = False)
"""
print('\nML learning with {} algorithm'.format(algo))
tag = {'prop': self.prop, 'feature':self.feature}
start = time()
if library in ['SkLearn', 'sklearn']:
ml = method(feature=self.X, prop=self.Y, algo=algo, tag=tag,
pipeline=pipeline, params=level)
elif library in ['PyTorch', 'pytorch']:
ml = dl_torch(feature=self.X, prop=self.Y, algo=algo, tag=tag,
hidden_layers=hidden_layers,
conv_layers=conv_layers)
else:
pass
end = time()
self.time['ml'] = end-start
if plot:
ml.plot_correlation(figname=self.file[:-4]+'_'+algo+'.png')
if print_info:
ml.print_summary()
if save:
from sklearn.externals import joblib
joblib.dump(ml.estimator, algo+'.joblib')
self.ml = ml | def ml_train(self, library='sklearn',
algo='KRR',
level='medium',
pipeline=False,
hidden_layers=None,
conv_layers=None,
plot=False,
print_info=True,
save=False):
"""
Build machine learning model for X/Y set.
Args:
library: sklearn, tensorflow, or pytorch
algo: the training algorithm.
level: The tightness level of training (light, medium, or tight)
pipeline: include preprocessing algorithm for pipelining
in sequence.
hidden_layers: hidden layers for neural network or
deep learning only.
plot: include plotting. (default = False)
print_info: print training and results information.
(default = True)
save: save the training simulation for future usage.
(default = False)
"""
print('\nML learning with {} algorithm'.format(algo))
tag = {'prop': self.prop, 'feature':self.feature}
start = time()
if library in ['SkLearn', 'sklearn']:
ml = method(feature=self.X, prop=self.Y, algo=algo, tag=tag,
pipeline=pipeline, params=level)
elif library in ['PyTorch', 'pytorch']:
ml = dl_torch(feature=self.X, prop=self.Y, algo=algo, tag=tag,
hidden_layers=hidden_layers,
conv_layers=conv_layers)
else:
pass
end = time()
self.time['ml'] = end-start
if plot:
ml.plot_correlation(figname=self.file[:-4]+'_'+algo+'.png')
if print_info:
ml.print_summary()
if save:
from sklearn.externals import joblib
joblib.dump(ml.estimator, algo+'.joblib')
self.ml = ml |
Python | def apply_feature_scaling(self, X):
"""
Feature scaling with the user-defined algorithm.
Apply this function to uncorrelated arrays of feature.
Returns:
arrays of scaled feature.
"""
X = eval(self.feature_scaling+'()').fit_transform(X)
return X | def apply_feature_scaling(self, X):
"""
Feature scaling with the user-defined algorithm.
Apply this function to uncorrelated arrays of feature.
Returns:
arrays of scaled feature.
"""
X = eval(self.feature_scaling+'()').fit_transform(X)
return X |
Python | def print_outliers(self):
"""
print the outlier information
todo: make the output as an option
"""
col_name = collections.OrderedDict(
{'Formula': [],
'Space group': [],
'Nsites': [],
'dY': [],
}
)
for id, diff in enumerate(self.ml.estimator.predict(self.X)-self.Y):
if abs(diff) > 3*self.ml.mae:
struc = self.strucs[id]
col_name['Formula'].append(struc.
composition.
get_reduced_formula_and_factor()[0])
col_name['Space group'].append(SpacegroupAnalyzer(struc).
get_space_group_symbol())
col_name['Nsites'].append(len(struc.species))
col_name['dY'].append(diff)
df = pd.DataFrame(col_name)
df = df.sort_values(['dY','Space group','Nsites'],
ascending=[True, True, True])
print('\nThe following structures have relatively high error')
print(tabulate(df, headers='keys', tablefmt='psql')) | def print_outliers(self):
"""
print the outlier information
todo: make the output as an option
"""
col_name = collections.OrderedDict(
{'Formula': [],
'Space group': [],
'Nsites': [],
'dY': [],
}
)
for id, diff in enumerate(self.ml.estimator.predict(self.X)-self.Y):
if abs(diff) > 3*self.ml.mae:
struc = self.strucs[id]
col_name['Formula'].append(struc.
composition.
get_reduced_formula_and_factor()[0])
col_name['Space group'].append(SpacegroupAnalyzer(struc).
get_space_group_symbol())
col_name['Nsites'].append(len(struc.species))
col_name['dY'].append(diff)
df = pd.DataFrame(col_name)
df = df.sort_values(['dY','Space group','Nsites'],
ascending=[True, True, True])
print('\nThe following structures have relatively high error')
print(tabulate(df, headers='keys', tablefmt='psql')) |
Python | def read_dict(self):
"""
reading from **kwargs argument to determine the comprehensiveness
level of training and its parameters.
"""
for key, value in self.dict.items():
if value in self.parameters_level:
self.level = value
self.params = self.algos_params[self.algo]
break
else:
self.level = None
self.params = value
break | def read_dict(self):
"""
reading from **kwargs argument to determine the comprehensiveness
level of training and its parameters.
"""
for key, value in self.dict.items():
if value in self.parameters_level:
self.level = value
self.params = self.algos_params[self.algo]
break
else:
self.level = None
self.params = value
break |
Python | def plot_correlation(self, figname=None, figsize=(12,8)):
"""
plot the correlation between prediction and target values
"""
plt.figure(figsize=figsize)
plt.scatter(self.y_predicted, self.Y_test, c='green', label='test')
plt.scatter(self.y_predicted0, self.Y_train, c='blue', label='train')
plt.title('{0:d} materials, r$^2$ = {1:.4f}, Algo: {2:s}'.
format(len(self.prop), self.r2, self.algo))
plt.xlabel('Prediction')
plt.ylabel(self.tag['prop'])
plt.legend()
if figname is None:
plt.show()
else:
plt.savefig(figname)
plt.close() | def plot_correlation(self, figname=None, figsize=(12,8)):
"""
plot the correlation between prediction and target values
"""
plt.figure(figsize=figsize)
plt.scatter(self.y_predicted, self.Y_test, c='green', label='test')
plt.scatter(self.y_predicted0, self.Y_train, c='blue', label='train')
plt.title('{0:d} materials, r$^2$ = {1:.4f}, Algo: {2:s}'.
format(len(self.prop), self.r2, self.algo))
plt.xlabel('Prediction')
plt.ylabel(self.tag['prop'])
plt.legend()
if figname is None:
plt.show()
else:
plt.savefig(figname)
plt.close() |
Python | def _get_data_from_pymatgen(ele):
'''
Get select elemental properties from pymatgen
args:
ele: a pymatgen element object
returns: a 1-d array containing select elemental properties
'''
'''
properties include:
atomic number, electronegativity, row, group, atomic mass,
atomic radius, van der waals radius, molar volume,
thermal conductivity, boiling point, melting point, and
solid density
'''
properties = [ele.Z, ele.X, ele.row, ele.group, ele.atomic_mass,
ele.atomic_radius, ele.van_der_waals_radius,
ele.molar_volume, ele.thermal_conductivity,
ele.boiling_point, ele.melting_point,
ele.density_of_solid]
if None in properties or np.isnan(properties).any():
for i, prop in enumerate(properties):
if prop is None or np.isnan(prop):
properties[i] = 0
# convert to numpy array and return properties
return np.array(properties) | def _get_data_from_pymatgen(ele):
'''
Get select elemental properties from pymatgen
args:
ele: a pymatgen element object
returns: a 1-d array containing select elemental properties
'''
'''
properties include:
atomic number, electronegativity, row, group, atomic mass,
atomic radius, van der waals radius, molar volume,
thermal conductivity, boiling point, melting point, and
solid density
'''
properties = [ele.Z, ele.X, ele.row, ele.group, ele.atomic_mass,
ele.atomic_radius, ele.van_der_waals_radius,
ele.molar_volume, ele.thermal_conductivity,
ele.boiling_point, ele.melting_point,
ele.density_of_solid]
if None in properties or np.isnan(properties).any():
for i, prop in enumerate(properties):
if prop is None or np.isnan(prop):
properties[i] = 0
# convert to numpy array and return properties
return np.array(properties) |
Python | def _get_data_from_json(ele):
'''
Get select elemental properties from json file
args:
ele: a pymatgen element object
returns: a 1-d array containing select elemental properties
'''
# convert element object to string
elm = str(ele)
if elm in ['Pa', 'Ac', 'Pu', 'Np', 'Am', 'Bk', 'Cf', 'Cm', 'Es',
'Fm', 'Lr', 'Md', 'No']:
elm = 'Th'
elif elm in ['Eu', 'Pm']:
elm = 'La'
elif elm in ['Xe', 'Rn']:
elm = 'Kr'
elif elm in ['At']:
elm = 'I'
elif elm in ['Fr']:
elm = 'Cs'
elif elm in ['Ra']:
elm = 'Ba'
# call element data from dictionary
data = ele_data[elm]
# select property keys
props = ['first_ion_en', 'elec_aff', 'hfus', 'polzbl']
# initiate empty array for properties
properties = []
for prop in props:
# call property from dictionary keys
properties.append(data[prop])
if None in properties:
for i, prop in enumerate(properties):
if prop is None:
properties[i] = 0
# convert to array and return properties
return np.array(properties) | def _get_data_from_json(ele):
'''
Get select elemental properties from json file
args:
ele: a pymatgen element object
returns: a 1-d array containing select elemental properties
'''
# convert element object to string
elm = str(ele)
if elm in ['Pa', 'Ac', 'Pu', 'Np', 'Am', 'Bk', 'Cf', 'Cm', 'Es',
'Fm', 'Lr', 'Md', 'No']:
elm = 'Th'
elif elm in ['Eu', 'Pm']:
elm = 'La'
elif elm in ['Xe', 'Rn']:
elm = 'Kr'
elif elm in ['At']:
elm = 'I'
elif elm in ['Fr']:
elm = 'Cs'
elif elm in ['Ra']:
elm = 'Ba'
# call element data from dictionary
data = ele_data[elm]
# select property keys
props = ['first_ion_en', 'elec_aff', 'hfus', 'polzbl']
# initiate empty array for properties
properties = []
for prop in props:
# call property from dictionary keys
properties.append(data[prop])
if None in properties:
for i, prop in enumerate(properties):
if prop is None:
properties[i] = 0
# convert to array and return properties
return np.array(properties) |
Python | def _qlm(site, neighbors, l, mvals):
'''
Calculates the complex vector associated with an atomic site and
one of its neighbors
Args:
site: a pymatgen crystal site
neighbors: a neighbor list corresponding to the site
l: free integer parameter
mvals: list of free integer parameters
Returns:
q: numpy array(complex128), the complex vector qlm normalized
by the number of nearest neighbors
'''
# initiate variable as a complex number
q = np.zeros(2*l+1, dtype=np.complex128)
# iterate over mvals
for i, m in enumerate(mvals):
# take the neighbor count
neighbors_count = len(neighbors)
# iterate over neighbors
for neighbor in neighbors:
# find the position vector of the site/neighbor pair
r_vec = neighbor.coords - site.coords
r_mag = np.linalg.norm(r_vec)
# arccos(z/norm(r))
theta = np.arccos(r_vec[2] / r_mag)
if abs((r_vec[2] / r_mag) - 1.0) < 10.**(-8.):
theta = 0.0
elif abs((r_vec[2] / r_mag) + 1.0) < 10.**(-8.):
theta = np.pi
# phi
if r_vec[0] < 0.:
phi = np.pi + np.arctan(r_vec[1] / r_vec[0])
elif 0. < r_vec[0] and r_vec[1] < 0.:
phi = 2 * np.pi + np.arctan(r_vec[1] / r_vec[0])
elif 0. < r_vec[0] and 0. <= r_vec[1]:
phi = np.arctan(r_vec[1] / r_vec[0])
elif r_vec[0] == 0. and 0. < r_vec[1]:
phi = 0.5 * np.pi
elif r_vec[0] == 0. and r_vec[1] < 0.:
phi = 1.5 * np.pi
else:
phi = 0.
'''
calculate the spherical harmonic associated with
the neighbor and add to q
'''
q[i] += sph_harm(l, m, theta, phi)
# normalize by number of neighbors
return q / neighbors_count | def _qlm(site, neighbors, l, mvals):
'''
Calculates the complex vector associated with an atomic site and
one of its neighbors
Args:
site: a pymatgen crystal site
neighbors: a neighbor list corresponding to the site
l: free integer parameter
mvals: list of free integer parameters
Returns:
q: numpy array(complex128), the complex vector qlm normalized
by the number of nearest neighbors
'''
# initiate variable as a complex number
q = np.zeros(2*l+1, dtype=np.complex128)
# iterate over mvals
for i, m in enumerate(mvals):
# take the neighbor count
neighbors_count = len(neighbors)
# iterate over neighbors
for neighbor in neighbors:
# find the position vector of the site/neighbor pair
r_vec = neighbor.coords - site.coords
r_mag = np.linalg.norm(r_vec)
# arccos(z/norm(r))
theta = np.arccos(r_vec[2] / r_mag)
if abs((r_vec[2] / r_mag) - 1.0) < 10.**(-8.):
theta = 0.0
elif abs((r_vec[2] / r_mag) + 1.0) < 10.**(-8.):
theta = np.pi
# phi
if r_vec[0] < 0.:
phi = np.pi + np.arctan(r_vec[1] / r_vec[0])
elif 0. < r_vec[0] and r_vec[1] < 0.:
phi = 2 * np.pi + np.arctan(r_vec[1] / r_vec[0])
elif 0. < r_vec[0] and 0. <= r_vec[1]:
phi = np.arctan(r_vec[1] / r_vec[0])
elif r_vec[0] == 0. and 0. < r_vec[1]:
phi = 0.5 * np.pi
elif r_vec[0] == 0. and r_vec[1] < 0.:
phi = 1.5 * np.pi
else:
phi = 0.
'''
calculate the spherical harmonic associated with
the neighbor and add to q
'''
q[i] += sph_harm(l, m, theta, phi)
# normalize by number of neighbors
return q / neighbors_count |
Python | def _scalar_product(q1, q2):
'''
Calculates the scalar product between two
complex vectors using the conjugate
Args:
q1: a complex vector
q2: a complex vector
Returns: float, The scalar product of two complex vectors
'''
'''
take the scalar product (vector * conjugates) and sum them
to calculate the scalar product, this will be a real number
so change the data type to float
'''
return float(np.sum(q1*np.conjugate(q2))) | def _scalar_product(q1, q2):
'''
Calculates the scalar product between two
complex vectors using the conjugate
Args:
q1: a complex vector
q2: a complex vector
Returns: float, The scalar product of two complex vectors
'''
'''
take the scalar product (vector * conjugates) and sum them
to calculate the scalar product, this will be a real number
so change the data type to float
'''
return float(np.sum(q1*np.conjugate(q2))) |
Python | def _ql(scalar_product, l):
'''
Calculates the steinhardt bond order parameter ql
given the scalar product of the complex vector of qlms with itself
and the parameter degree l
Args:
scalar_product: the scalar product of the complex vector qli
with itself
l: the degree of the bond order parameter
Returns:
ql: float, the steinhardt bond order parameter ql
'''
constant = (4 * np.pi) / (2*l + 1)
return np.sqrt(constant * scalar_product) | def _ql(scalar_product, l):
'''
Calculates the steinhardt bond order parameter ql
given the scalar product of the complex vector of qlms with itself
and the parameter degree l
Args:
scalar_product: the scalar product of the complex vector qli
with itself
l: the degree of the bond order parameter
Returns:
ql: float, the steinhardt bond order parameter ql
'''
constant = (4 * np.pi) / (2*l + 1)
return np.sqrt(constant * scalar_product) |
Python | def _wli(qlms, l, m1, m2, m3):
'''Calculates the steinhardt bond order parameter wl
given the complex vector qlms, the parameter degree l
and the free integer parameters m1, m2, and m3
Args:
qlms: the complex vector of qlm values
corresponding to the parameter degree
l: degree of the steinhardt bond order parameter
m1, m2, m3: free integer parameters
Returns:
wli: float, the real part of the complex steinhardt bond order
parameter wl
'''
# calculate the wigner3j value for l and the free integer parameters (float)
w3j = wigner_3j(l, m1-l, l, m2-l, l, m3-l)
'''
call the complex numbers with indeces corresponding to the free
integer parameters m1, m2, m3 and multiply them together
'''
q1 = qlms[m1]
q2 = qlms[m2]
q3 = qlms[m3]
q = q1 * q2 * q3
# multiply the wigner3j value and the real part of the product of q1, q2, q3
wli = w3j * q.real
return wli | def _wli(qlms, l, m1, m2, m3):
'''Calculates the steinhardt bond order parameter wl
given the complex vector qlms, the parameter degree l
and the free integer parameters m1, m2, and m3
Args:
qlms: the complex vector of qlm values
corresponding to the parameter degree
l: degree of the steinhardt bond order parameter
m1, m2, m3: free integer parameters
Returns:
wli: float, the real part of the complex steinhardt bond order
parameter wl
'''
# calculate the wigner3j value for l and the free integer parameters (float)
w3j = wigner_3j(l, m1-l, l, m2-l, l, m3-l)
'''
call the complex numbers with indeces corresponding to the free
integer parameters m1, m2, m3 and multiply them together
'''
q1 = qlms[m1]
q2 = qlms[m2]
q3 = qlms[m3]
q = q1 * q2 * q3
# multiply the wigner3j value and the real part of the product of q1, q2, q3
wli = w3j * q.real
return wli |
Python | def apply_feature_scaling_array(self, X):
"""
Feature scaling with the user-defined algorithm.
Apply this function to correlated arrays of feature.
E.g. partial radial distribution function.
Returns:
arrays of scaled feature.
"""
X = eval(self.feature_scaling+'()').fit_transform(X)
return X | def apply_feature_scaling_array(self, X):
"""
Feature scaling with the user-defined algorithm.
Apply this function to correlated arrays of feature.
E.g. partial radial distribution function.
Returns:
arrays of scaled feature.
"""
X = eval(self.feature_scaling+'()').fit_transform(X)
return X |
Python | def all(self):
'''
Computes all Voronoi polyhedra features
Calls all voronoi features and stacks them
into a 1-d array
'''
# call voronoi feature methods
pef = self.get_packing_efficiency()
vstat = self.get_volume_statistics()
ecn = self.get_effective_coordination_number()
bstat = self.get_bond_statistics()
cop = self.get_chemical_ordering_parameters()
ea = self.get_environment_attributes()
# stack into 1-d array
arr = np.hstack((pef, vstat, ecn, bstat, cop, ea))
return arr | def all(self):
'''
Computes all Voronoi polyhedra features
Calls all voronoi features and stacks them
into a 1-d array
'''
# call voronoi feature methods
pef = self.get_packing_efficiency()
vstat = self.get_volume_statistics()
ecn = self.get_effective_coordination_number()
bstat = self.get_bond_statistics()
cop = self.get_chemical_ordering_parameters()
ea = self.get_environment_attributes()
# stack into 1-d array
arr = np.hstack((pef, vstat, ecn, bstat, cop, ea))
return arr |
Python | def _populate_element_dict(self):
'''
For features that depend on elements, populate a dictionary
of empty lists with each element in the structure as a key
Args:
elements_list: a list of pymatgen element objects
Returns:
an dictionary of empty lists where the keys are pymatgen elements
'''
# create an empty dictionary
element_dict = {}
# iterate over element attribute to create
# a dictionary of empty lists with the constituent
# elements as keys
for element in self._elements:
element_dict[element] = []
return element_dict | def _populate_element_dict(self):
'''
For features that depend on elements, populate a dictionary
of empty lists with each element in the structure as a key
Args:
elements_list: a list of pymatgen element objects
Returns:
an dictionary of empty lists where the keys are pymatgen elements
'''
# create an empty dictionary
element_dict = {}
# iterate over element attribute to create
# a dictionary of empty lists with the constituent
# elements as keys
for element in self._elements:
element_dict[element] = []
return element_dict |
Python | def _weighted_average(array, weights=None):
'''
Compute the weighted average of a 1-d array
Args:
array: a 1-d array or list
weights: weights corresponding to each element in the list
Returns:
the weighted average of the array
'''
return np.average(array, weights=weights) | def _weighted_average(array, weights=None):
'''
Compute the weighted average of a 1-d array
Args:
array: a 1-d array or list
weights: weights corresponding to each element in the list
Returns:
the weighted average of the array
'''
return np.average(array, weights=weights) |
Subsets and Splits