body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
@property def n(self): 'Returns the number of elements in the set covered by this cover.' return self._n
2,698,383,659,266,666,500
Returns the number of elements in the set covered by this cover.
igraph/clustering.py
n
tuandnvn/ecat_learning
python
@property def n(self): return self._n
def size(self, idx): 'Returns the size of a given cluster.\n\n @param idx: the cluster in which we are interested.\n ' return len(self[idx])
-2,611,264,052,909,075,500
Returns the size of a given cluster. @param idx: the cluster in which we are interested.
igraph/clustering.py
size
tuandnvn/ecat_learning
python
def size(self, idx): 'Returns the size of a given cluster.\n\n @param idx: the cluster in which we are interested.\n ' return len(self[idx])
def sizes(self, *args): 'Returns the size of given clusters.\n\n The indices are given as positional arguments. If there are no\n positional arguments, the function will return the sizes of all clusters.\n ' if args: return [len(self._clusters[idx]) for idx in args] return [len(cluster) for cluster in self]
-449,967,433,735,330,750
Returns the size of given clusters. The indices are given as positional arguments. If there are no positional arguments, the function will return the sizes of all clusters.
igraph/clustering.py
sizes
tuandnvn/ecat_learning
python
def sizes(self, *args): 'Returns the size of given clusters.\n\n The indices are given as positional arguments. If there are no\n positional arguments, the function will return the sizes of all clusters.\n ' if args: return [len(self._clusters[idx]) for idx in args] return [len(cluster) for cluster in self]
def size_histogram(self, bin_width=1): 'Returns the histogram of cluster sizes.\n\n @param bin_width: the bin width of the histogram\n @return: a L{Histogram} object\n ' return Histogram(bin_width, self.sizes())
-2,461,763,455,575,568,000
Returns the histogram of cluster sizes. @param bin_width: the bin width of the histogram @return: a L{Histogram} object
igraph/clustering.py
size_histogram
tuandnvn/ecat_learning
python
def size_histogram(self, bin_width=1): 'Returns the histogram of cluster sizes.\n\n @param bin_width: the bin width of the histogram\n @return: a L{Histogram} object\n ' return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None): 'Returns the summary of the cover.\n\n The summary includes the number of items and clusters, and also the\n list of members for each of the clusters if the verbosity is nonzero.\n\n @param verbosity: determines whether the cluster members should be\n printed. Zero verbosity prints the number of items and clusters only.\n @return: the summary of the cover as a string.\n ' out = StringIO() ((print >> out), ('Cover with %d clusters' % len(self))) if (verbosity < 1): return out.getvalue().strip() ndigits = len(str(len(self))) wrapper = _get_wrapper_for_width(width, subsequent_indent=(' ' * (ndigits + 3))) for (idx, cluster) in enumerate(self._formatted_cluster_iterator()): wrapper.initial_indent = ('[%*d] ' % (ndigits, idx)) ((print >> out), '\n'.join(wrapper.wrap(cluster))) return out.getvalue().strip()
-6,739,493,703,869,613,000
Returns the summary of the cover. The summary includes the number of items and clusters, and also the list of members for each of the clusters if the verbosity is nonzero. @param verbosity: determines whether the cluster members should be printed. Zero verbosity prints the number of items and clusters only. @return: the summary of the cover as a string.
igraph/clustering.py
summary
tuandnvn/ecat_learning
python
def summary(self, verbosity=0, width=None): 'Returns the summary of the cover.\n\n The summary includes the number of items and clusters, and also the\n list of members for each of the clusters if the verbosity is nonzero.\n\n @param verbosity: determines whether the cluster members should be\n printed. Zero verbosity prints the number of items and clusters only.\n @return: the summary of the cover as a string.\n ' out = StringIO() ((print >> out), ('Cover with %d clusters' % len(self))) if (verbosity < 1): return out.getvalue().strip() ndigits = len(str(len(self))) wrapper = _get_wrapper_for_width(width, subsequent_indent=(' ' * (ndigits + 3))) for (idx, cluster) in enumerate(self._formatted_cluster_iterator()): wrapper.initial_indent = ('[%*d] ' % (ndigits, idx)) ((print >> out), '\n'.join(wrapper.wrap(cluster))) return out.getvalue().strip()
def _formatted_cluster_iterator(self): 'Iterates over the clusters and formats them into a string to be\n presented in the summary.' for cluster in self: (yield ', '.join((str(member) for member in cluster)))
810,895,616,325,758,500
Iterates over the clusters and formats them into a string to be presented in the summary.
igraph/clustering.py
_formatted_cluster_iterator
tuandnvn/ecat_learning
python
def _formatted_cluster_iterator(self): 'Iterates over the clusters and formats them into a string to be\n presented in the summary.' for cluster in self: (yield ', '.join((str(member) for member in cluster)))
def __init__(self, graph, clusters=None): 'Creates a cover object for a given graph.\n\n @param graph: the graph that will be associated to the cover\n @param clusters: the list of clusters. If C{None}, it is assumed\n that there is only a single cluster that covers the whole graph.\n ' if (clusters is None): clusters = [range(graph.vcount())] Cover.__init__(self, clusters, n=graph.vcount()) if (self._n > graph.vcount()): raise ValueError('cluster list contains vertex ID larger than the number of vertices in the graph') self._graph = graph
3,603,294,436,514,720,300
Creates a cover object for a given graph. @param graph: the graph that will be associated to the cover @param clusters: the list of clusters. If C{None}, it is assumed that there is only a single cluster that covers the whole graph.
igraph/clustering.py
__init__
tuandnvn/ecat_learning
python
def __init__(self, graph, clusters=None): 'Creates a cover object for a given graph.\n\n @param graph: the graph that will be associated to the cover\n @param clusters: the list of clusters. If C{None}, it is assumed\n that there is only a single cluster that covers the whole graph.\n ' if (clusters is None): clusters = [range(graph.vcount())] Cover.__init__(self, clusters, n=graph.vcount()) if (self._n > graph.vcount()): raise ValueError('cluster list contains vertex ID larger than the number of vertices in the graph') self._graph = graph
def crossing(self): 'Returns a boolean vector where element M{i} is C{True} iff edge\n M{i} lies between clusters, C{False} otherwise.' membership = [frozenset(cluster) for cluster in self.membership] return [membership[v1].isdisjoint(membership[v2]) for (v1, v2) in self.graph.get_edgelist()]
3,467,809,426,251,922,400
Returns a boolean vector where element M{i} is C{True} iff edge M{i} lies between clusters, C{False} otherwise.
igraph/clustering.py
crossing
tuandnvn/ecat_learning
python
def crossing(self): 'Returns a boolean vector where element M{i} is C{True} iff edge\n M{i} lies between clusters, C{False} otherwise.' membership = [frozenset(cluster) for cluster in self.membership] return [membership[v1].isdisjoint(membership[v2]) for (v1, v2) in self.graph.get_edgelist()]
@property def graph(self): 'Returns the graph belonging to this object' return self._graph
-6,013,293,917,706,169,000
Returns the graph belonging to this object
igraph/clustering.py
graph
tuandnvn/ecat_learning
python
@property def graph(self): return self._graph
def subgraph(self, idx): "Get the subgraph belonging to a given cluster.\n\n @param idx: the cluster index\n @return: a copy of the subgraph\n @precondition: the vertex set of the graph hasn't been modified since\n the moment the cover was constructed.\n " return self._graph.subgraph(self[idx])
-3,335,315,451,606,916,600
Get the subgraph belonging to a given cluster. @param idx: the cluster index @return: a copy of the subgraph @precondition: the vertex set of the graph hasn't been modified since the moment the cover was constructed.
igraph/clustering.py
subgraph
tuandnvn/ecat_learning
python
def subgraph(self, idx): "Get the subgraph belonging to a given cluster.\n\n @param idx: the cluster index\n @return: a copy of the subgraph\n @precondition: the vertex set of the graph hasn't been modified since\n the moment the cover was constructed.\n " return self._graph.subgraph(self[idx])
def subgraphs(self): "Gets all the subgraphs belonging to each of the clusters.\n\n @return: a list containing copies of the subgraphs\n @precondition: the vertex set of the graph hasn't been modified since\n the moment the cover was constructed.\n " return [self._graph.subgraph(cl) for cl in self]
-6,612,431,575,192,130,000
Gets all the subgraphs belonging to each of the clusters. @return: a list containing copies of the subgraphs @precondition: the vertex set of the graph hasn't been modified since the moment the cover was constructed.
igraph/clustering.py
subgraphs
tuandnvn/ecat_learning
python
def subgraphs(self): "Gets all the subgraphs belonging to each of the clusters.\n\n @return: a list containing copies of the subgraphs\n @precondition: the vertex set of the graph hasn't been modified since\n the moment the cover was constructed.\n " return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds): 'Plots the cover to the given Cairo context in the given\n bounding box.\n\n This is done by calling L{Graph.__plot__()} with the same arguments, but\n drawing nice colored blobs around the vertex groups.\n\n This method understands all the positional and keyword arguments that\n are understood by L{Graph.__plot__()}, only the differences will be\n highlighted here:\n\n - C{mark_groups}: whether to highlight the vertex clusters by\n colored polygons. Besides the values accepted by L{Graph.__plot__}\n (i.e., a dict mapping colors to vertex indices, a list containing\n lists of vertex indices, or C{False}), the following are also\n accepted:\n\n - C{True}: all the clusters will be highlighted, the colors matching\n the corresponding color indices from the current palette\n (see the C{palette} keyword argument of L{Graph.__plot__}.\n\n - A dict mapping cluster indices or tuples of vertex indices to\n color names. The given clusters or vertex groups will be\n highlighted by the given colors.\n\n - A list of cluster indices. This is equivalent to passing a\n dict mapping numeric color indices from the current palette\n to cluster indices; therefore, the cluster referred to by element\n I{i} of the list will be highlighted by color I{i} from the\n palette.\n\n The value of the C{plotting.mark_groups} configuration key is also\n taken into account here; if that configuration key is C{True} and\n C{mark_groups} is not given explicitly, it will automatically be set\n to C{True}.\n\n In place of lists of vertex indices, you may also use L{VertexSeq}\n instances.\n\n In place of color names, you may also use color indices into the\n current palette. C{None} as a color name will mean that the\n corresponding group is ignored.\n\n - C{palette}: the palette used to resolve numeric color indices to RGBA\n values. By default, this is an instance of L{ClusterColoringPalette}.\n\n @see: L{Graph.__plot__()} for more supported keyword arguments.\n ' if (('edge_color' not in kwds) and ('color' not in self.graph.edge_attributes())): colors = ['grey20', 'grey80'] kwds['edge_color'] = [colors[is_crossing] for is_crossing in self.crossing()] if ('palette' in kwds): palette = kwds['palette'] else: palette = ClusterColoringPalette(len(self)) if ('mark_groups' not in kwds): if Configuration.instance()['plotting.mark_groups']: kwds['mark_groups'] = enumerate(self) else: kwds['mark_groups'] = _handle_mark_groups_arg_for_clustering(kwds['mark_groups'], self) return self._graph.__plot__(context, bbox, palette, *args, **kwds)
-8,213,005,111,044,524,000
Plots the cover to the given Cairo context in the given bounding box. This is done by calling L{Graph.__plot__()} with the same arguments, but drawing nice colored blobs around the vertex groups. This method understands all the positional and keyword arguments that are understood by L{Graph.__plot__()}, only the differences will be highlighted here: - C{mark_groups}: whether to highlight the vertex clusters by colored polygons. Besides the values accepted by L{Graph.__plot__} (i.e., a dict mapping colors to vertex indices, a list containing lists of vertex indices, or C{False}), the following are also accepted: - C{True}: all the clusters will be highlighted, the colors matching the corresponding color indices from the current palette (see the C{palette} keyword argument of L{Graph.__plot__}. - A dict mapping cluster indices or tuples of vertex indices to color names. The given clusters or vertex groups will be highlighted by the given colors. - A list of cluster indices. This is equivalent to passing a dict mapping numeric color indices from the current palette to cluster indices; therefore, the cluster referred to by element I{i} of the list will be highlighted by color I{i} from the palette. The value of the C{plotting.mark_groups} configuration key is also taken into account here; if that configuration key is C{True} and C{mark_groups} is not given explicitly, it will automatically be set to C{True}. In place of lists of vertex indices, you may also use L{VertexSeq} instances. In place of color names, you may also use color indices into the current palette. C{None} as a color name will mean that the corresponding group is ignored. - C{palette}: the palette used to resolve numeric color indices to RGBA values. By default, this is an instance of L{ClusterColoringPalette}. @see: L{Graph.__plot__()} for more supported keyword arguments.
igraph/clustering.py
__plot__
tuandnvn/ecat_learning
python
def __plot__(self, context, bbox, palette, *args, **kwds): 'Plots the cover to the given Cairo context in the given\n bounding box.\n\n This is done by calling L{Graph.__plot__()} with the same arguments, but\n drawing nice colored blobs around the vertex groups.\n\n This method understands all the positional and keyword arguments that\n are understood by L{Graph.__plot__()}, only the differences will be\n highlighted here:\n\n - C{mark_groups}: whether to highlight the vertex clusters by\n colored polygons. Besides the values accepted by L{Graph.__plot__}\n (i.e., a dict mapping colors to vertex indices, a list containing\n lists of vertex indices, or C{False}), the following are also\n accepted:\n\n - C{True}: all the clusters will be highlighted, the colors matching\n the corresponding color indices from the current palette\n (see the C{palette} keyword argument of L{Graph.__plot__}.\n\n - A dict mapping cluster indices or tuples of vertex indices to\n color names. The given clusters or vertex groups will be\n highlighted by the given colors.\n\n - A list of cluster indices. This is equivalent to passing a\n dict mapping numeric color indices from the current palette\n to cluster indices; therefore, the cluster referred to by element\n I{i} of the list will be highlighted by color I{i} from the\n palette.\n\n The value of the C{plotting.mark_groups} configuration key is also\n taken into account here; if that configuration key is C{True} and\n C{mark_groups} is not given explicitly, it will automatically be set\n to C{True}.\n\n In place of lists of vertex indices, you may also use L{VertexSeq}\n instances.\n\n In place of color names, you may also use color indices into the\n current palette. C{None} as a color name will mean that the\n corresponding group is ignored.\n\n - C{palette}: the palette used to resolve numeric color indices to RGBA\n values. By default, this is an instance of L{ClusterColoringPalette}.\n\n @see: L{Graph.__plot__()} for more supported keyword arguments.\n ' if (('edge_color' not in kwds) and ('color' not in self.graph.edge_attributes())): colors = ['grey20', 'grey80'] kwds['edge_color'] = [colors[is_crossing] for is_crossing in self.crossing()] if ('palette' in kwds): palette = kwds['palette'] else: palette = ClusterColoringPalette(len(self)) if ('mark_groups' not in kwds): if Configuration.instance()['plotting.mark_groups']: kwds['mark_groups'] = enumerate(self) else: kwds['mark_groups'] = _handle_mark_groups_arg_for_clustering(kwds['mark_groups'], self) return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self): 'Iterates over the clusters and formats them into a string to be\n presented in the summary.' if self._graph.is_named(): names = self._graph.vs['name'] for cluster in self: (yield ', '.join((str(names[member]) for member in cluster))) else: for cluster in self: (yield ', '.join((str(member) for member in cluster)))
6,838,424,363,819,696,000
Iterates over the clusters and formats them into a string to be presented in the summary.
igraph/clustering.py
_formatted_cluster_iterator
tuandnvn/ecat_learning
python
def _formatted_cluster_iterator(self): 'Iterates over the clusters and formats them into a string to be\n presented in the summary.' if self._graph.is_named(): names = self._graph.vs['name'] for cluster in self: (yield ', '.join((str(names[member]) for member in cluster))) else: for cluster in self: (yield ', '.join((str(member) for member in cluster)))
def __init__(self, graph, blocks=None, cohesion=None, parent=None): 'Constructs a new cohesive block structure for the given graph.\n\n If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the\n arguments will be ignored and L{Graph.cohesive_blocks()} will be\n called to calculate the cohesive blocks. Otherwise, these three\n variables should describe the *result* of a cohesive block structure\n calculation. Chances are that you never have to construct L{CohesiveBlocks}\n instances directly, just use L{Graph.cohesive_blocks()}.\n\n @param graph: the graph itself\n @param blocks: a list containing the blocks; each block is described\n as a list containing vertex IDs.\n @param cohesion: the cohesion of each block. The length of this list\n must be equal to the length of I{blocks}.\n @param parent: the parent block of each block. Negative values or\n C{None} mean that there is no parent block for that block. There\n should be only one parent block, which covers the entire graph.\n @see: Graph.cohesive_blocks()\n ' if ((blocks is None) or (cohesion is None) or (parent is None)): (blocks, cohesion, parent) = graph.cohesive_blocks() VertexCover.__init__(self, graph, blocks) self._cohesion = cohesion self._parent = parent for (idx, p) in enumerate(self._parent): if (p < 0): self._parent[idx] = None
-2,177,125,745,029,691,600
Constructs a new cohesive block structure for the given graph. If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the arguments will be ignored and L{Graph.cohesive_blocks()} will be called to calculate the cohesive blocks. Otherwise, these three variables should describe the *result* of a cohesive block structure calculation. Chances are that you never have to construct L{CohesiveBlocks} instances directly, just use L{Graph.cohesive_blocks()}. @param graph: the graph itself @param blocks: a list containing the blocks; each block is described as a list containing vertex IDs. @param cohesion: the cohesion of each block. The length of this list must be equal to the length of I{blocks}. @param parent: the parent block of each block. Negative values or C{None} mean that there is no parent block for that block. There should be only one parent block, which covers the entire graph. @see: Graph.cohesive_blocks()
igraph/clustering.py
__init__
tuandnvn/ecat_learning
python
def __init__(self, graph, blocks=None, cohesion=None, parent=None): 'Constructs a new cohesive block structure for the given graph.\n\n If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the\n arguments will be ignored and L{Graph.cohesive_blocks()} will be\n called to calculate the cohesive blocks. Otherwise, these three\n variables should describe the *result* of a cohesive block structure\n calculation. Chances are that you never have to construct L{CohesiveBlocks}\n instances directly, just use L{Graph.cohesive_blocks()}.\n\n @param graph: the graph itself\n @param blocks: a list containing the blocks; each block is described\n as a list containing vertex IDs.\n @param cohesion: the cohesion of each block. The length of this list\n must be equal to the length of I{blocks}.\n @param parent: the parent block of each block. Negative values or\n C{None} mean that there is no parent block for that block. There\n should be only one parent block, which covers the entire graph.\n @see: Graph.cohesive_blocks()\n ' if ((blocks is None) or (cohesion is None) or (parent is None)): (blocks, cohesion, parent) = graph.cohesive_blocks() VertexCover.__init__(self, graph, blocks) self._cohesion = cohesion self._parent = parent for (idx, p) in enumerate(self._parent): if (p < 0): self._parent[idx] = None
def cohesion(self, idx): 'Returns the cohesion of the group with the given index.' return self._cohesion[idx]
-3,294,174,005,412,983,000
Returns the cohesion of the group with the given index.
igraph/clustering.py
cohesion
tuandnvn/ecat_learning
python
def cohesion(self, idx): return self._cohesion[idx]
def cohesions(self): 'Returns the list of cohesion values for each group.' return self._cohesion[:]
-9,078,904,226,652,061,000
Returns the list of cohesion values for each group.
igraph/clustering.py
cohesions
tuandnvn/ecat_learning
python
def cohesions(self): return self._cohesion[:]
def hierarchy(self): 'Returns a new graph that describes the hierarchical relationships\n between the groups.\n\n The new graph will be a directed tree; an edge will point from\n vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.\n In other words, the edges point downwards.\n ' from igraph import Graph edges = [pair for pair in izip(self._parent, xrange(len(self))) if (pair[0] is not None)] return Graph(edges, directed=True)
5,892,941,643,755,646,000
Returns a new graph that describes the hierarchical relationships between the groups. The new graph will be a directed tree; an edge will point from vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}. In other words, the edges point downwards.
igraph/clustering.py
hierarchy
tuandnvn/ecat_learning
python
def hierarchy(self): 'Returns a new graph that describes the hierarchical relationships\n between the groups.\n\n The new graph will be a directed tree; an edge will point from\n vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.\n In other words, the edges point downwards.\n ' from igraph import Graph edges = [pair for pair in izip(self._parent, xrange(len(self))) if (pair[0] is not None)] return Graph(edges, directed=True)
def max_cohesion(self, idx): 'Finds the maximum cohesion score among all the groups that contain\n the given vertex.' result = 0 for (cohesion, cluster) in izip(self._cohesion, self._clusters): if (idx in cluster): result = max(result, cohesion) return result
4,111,417,056,152,360,000
Finds the maximum cohesion score among all the groups that contain the given vertex.
igraph/clustering.py
max_cohesion
tuandnvn/ecat_learning
python
def max_cohesion(self, idx): 'Finds the maximum cohesion score among all the groups that contain\n the given vertex.' result = 0 for (cohesion, cluster) in izip(self._cohesion, self._clusters): if (idx in cluster): result = max(result, cohesion) return result
def max_cohesions(self): 'For each vertex in the graph, returns the maximum cohesion score\n among all the groups that contain the vertex.' result = ([0] * self._graph.vcount()) for (cohesion, cluster) in izip(self._cohesion, self._clusters): for idx in cluster: result[idx] = max(result[idx], cohesion) return result
-9,051,087,033,062,930,000
For each vertex in the graph, returns the maximum cohesion score among all the groups that contain the vertex.
igraph/clustering.py
max_cohesions
tuandnvn/ecat_learning
python
def max_cohesions(self): 'For each vertex in the graph, returns the maximum cohesion score\n among all the groups that contain the vertex.' result = ([0] * self._graph.vcount()) for (cohesion, cluster) in izip(self._cohesion, self._clusters): for idx in cluster: result[idx] = max(result[idx], cohesion) return result
def parent(self, idx): 'Returns the parent group index of the group with the given index\n or C{None} if the given group is the root.' return self._parent[idx]
-2,531,504,258,032,719,000
Returns the parent group index of the group with the given index or C{None} if the given group is the root.
igraph/clustering.py
parent
tuandnvn/ecat_learning
python
def parent(self, idx): 'Returns the parent group index of the group with the given index\n or C{None} if the given group is the root.' return self._parent[idx]
def parents(self): 'Returns the list of parent group indices for each group or C{None}\n if the given group is the root.' return self._parent[:]
4,112,353,736,681,686,500
Returns the list of parent group indices for each group or C{None} if the given group is the root.
igraph/clustering.py
parents
tuandnvn/ecat_learning
python
def parents(self): 'Returns the list of parent group indices for each group or C{None}\n if the given group is the root.' return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds): 'Plots the cohesive block structure to the given Cairo context in\n the given bounding box.\n\n Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword\n arguments accepted by L{VertexCover.__plot__()} are also accepted here.\n The only difference is that the vertices are colored according to their\n maximal cohesions by default, and groups are marked by colored blobs\n except the last group which encapsulates the whole graph.\n\n See the documentation of L{VertexCover.__plot__()} for more details.\n ' prepare_groups = False if ('mark_groups' not in kwds): if Configuration.instance()['plotting.mark_groups']: prepare_groups = True elif (kwds['mark_groups'] == True): prepare_groups = True if prepare_groups: colors = [pair for pair in enumerate(self.cohesions()) if (pair[1] > 1)] kwds['mark_groups'] = colors if ('vertex_color' not in kwds): kwds['vertex_color'] = self.max_cohesions() return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
3,229,657,450,994,385,400
Plots the cohesive block structure to the given Cairo context in the given bounding box. Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword arguments accepted by L{VertexCover.__plot__()} are also accepted here. The only difference is that the vertices are colored according to their maximal cohesions by default, and groups are marked by colored blobs except the last group which encapsulates the whole graph. See the documentation of L{VertexCover.__plot__()} for more details.
igraph/clustering.py
__plot__
tuandnvn/ecat_learning
python
def __plot__(self, context, bbox, palette, *args, **kwds): 'Plots the cohesive block structure to the given Cairo context in\n the given bounding box.\n\n Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword\n arguments accepted by L{VertexCover.__plot__()} are also accepted here.\n The only difference is that the vertices are colored according to their\n maximal cohesions by default, and groups are marked by colored blobs\n except the last group which encapsulates the whole graph.\n\n See the documentation of L{VertexCover.__plot__()} for more details.\n ' prepare_groups = False if ('mark_groups' not in kwds): if Configuration.instance()['plotting.mark_groups']: prepare_groups = True elif (kwds['mark_groups'] == True): prepare_groups = True if prepare_groups: colors = [pair for pair in enumerate(self.cohesions()) if (pair[1] > 1)] kwds['mark_groups'] = colors if ('vertex_color' not in kwds): kwds['vertex_color'] = self.max_cohesions() return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def safeintdiv(x, y): 'Safe integer division that handles None gracefully' if (x is None): return None return int((x / y))
5,398,877,169,241,703,000
Safe integer division that handles None gracefully
igraph/clustering.py
safeintdiv
tuandnvn/ecat_learning
python
def safeintdiv(x, y): if (x is None): return None return int((x / y))
def safebisect(intervals, x): 'Safe list bisection that handles None gracefully' if (x is None): return None return bisect(intervals, x)
8,481,049,750,746,316,000
Safe list bisection that handles None gracefully
igraph/clustering.py
safebisect
tuandnvn/ecat_learning
python
def safebisect(intervals, x): if (x is None): return None return bisect(intervals, x)
@pytest.fixture def pressure_sensor() -> mmr920C04.PressureSensor: 'Fixture for pressure sensor driver.' return mmr920C04.PressureSensor()
876,683,139,357,676,000
Fixture for pressure sensor driver.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
pressure_sensor
Opentrons/protocol_framework
python
@pytest.fixture def pressure_sensor() -> mmr920C04.PressureSensor: return mmr920C04.PressureSensor()
@pytest.fixture def capacitive_sensor() -> fdc1004.CapacitiveSensor: 'Fixture for capacitive sensor driver.' return fdc1004.CapacitiveSensor()
-5,125,160,098,761,263,000
Fixture for capacitive sensor driver.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
capacitive_sensor
Opentrons/protocol_framework
python
@pytest.fixture def capacitive_sensor() -> fdc1004.CapacitiveSensor: return fdc1004.CapacitiveSensor()
@pytest.fixture def temperature_sensor() -> hdc2080.EnvironmentSensor: 'Fixture for temperature sensor driver.' return hdc2080.EnvironmentSensor(SensorType.temperature)
-6,278,661,759,028,029,000
Fixture for temperature sensor driver.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
temperature_sensor
Opentrons/protocol_framework
python
@pytest.fixture def temperature_sensor() -> hdc2080.EnvironmentSensor: return hdc2080.EnvironmentSensor(SensorType.temperature)
@pytest.fixture def humidity_sensor() -> hdc2080.EnvironmentSensor: 'Fixture for humidity sensor driver.' return hdc2080.EnvironmentSensor(SensorType.humidity)
-2,967,201,093,913,927,000
Fixture for humidity sensor driver.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
humidity_sensor
Opentrons/protocol_framework
python
@pytest.fixture def humidity_sensor() -> hdc2080.EnvironmentSensor: return hdc2080.EnvironmentSensor(SensorType.humidity)
@pytest.mark.parametrize(argnames=['sensor', 'node', 'message'], argvalues=[[lazy_fixture('pressure_sensor'), NodeId.pipette_left, BaselineSensorRequest(payload=BaselineSensorRequestPayload(sensor=SensorTypeField(SensorType.pressure), sensor_id=SensorIdField(SensorId.S0), sample_rate=UInt16Field(10)))], [lazy_fixture('capacitive_sensor'), NodeId.pipette_left, BaselineSensorRequest(payload=BaselineSensorRequestPayload(sensor=SensorTypeField(SensorType.capacitive), sensor_id=SensorIdField(SensorId.S0), sample_rate=UInt16Field(10)))]]) async def test_polling(sensor: sensor_abc.AbstractAdvancedSensor, node: NodeId, message: MessageDefinition) -> None: 'Test that a polling function sends the expected message.' messenger = mock.AsyncMock(spec=CanMessenger) (await sensor.get_baseline(messenger, node, 10, 10)) messenger.send.assert_called_once_with(node_id=node, message=message)
-4,447,089,151,653,519,400
Test that a polling function sends the expected message.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
test_polling
Opentrons/protocol_framework
python
@pytest.mark.parametrize(argnames=['sensor', 'node', 'message'], argvalues=[[lazy_fixture('pressure_sensor'), NodeId.pipette_left, BaselineSensorRequest(payload=BaselineSensorRequestPayload(sensor=SensorTypeField(SensorType.pressure), sensor_id=SensorIdField(SensorId.S0), sample_rate=UInt16Field(10)))], [lazy_fixture('capacitive_sensor'), NodeId.pipette_left, BaselineSensorRequest(payload=BaselineSensorRequestPayload(sensor=SensorTypeField(SensorType.capacitive), sensor_id=SensorIdField(SensorId.S0), sample_rate=UInt16Field(10)))]]) async def test_polling(sensor: sensor_abc.AbstractAdvancedSensor, node: NodeId, message: MessageDefinition) -> None: messenger = mock.AsyncMock(spec=CanMessenger) (await sensor.get_baseline(messenger, node, 10, 10)) messenger.send.assert_called_once_with(node_id=node, message=message)
@pytest.mark.parametrize(argnames=['sensor'], argvalues=[[lazy_fixture('pressure_sensor')], [lazy_fixture('capacitive_sensor')]]) async def test_receive_data_polling(sensor: sensor_abc.AbstractAdvancedSensor, mock_messenger: mock.AsyncMock, can_message_notifier: MockCanMessageNotifier) -> None: 'Test that data is received from the polling function.' def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' can_message_notifier.notify(ReadFromSensorResponse(payload=ReadFromSensorResponsePayload(sensor_data=Int32Field(256), sensor_id=SensorIdField(SensorId.S0), sensor=SensorTypeField(sensor._sensor_type))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=NodeId.host, function_code=0, originating_node_id=node_id))) mock_messenger.send.side_effect = responder return_data = (await sensor.get_baseline(mock_messenger, NodeId.pipette_left, 10, 10)) assert (return_data == SensorDataType.build([0, 1, 0]))
2,886,839,213,322,297,000
Test that data is received from the polling function.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
test_receive_data_polling
Opentrons/protocol_framework
python
@pytest.mark.parametrize(argnames=['sensor'], argvalues=[[lazy_fixture('pressure_sensor')], [lazy_fixture('capacitive_sensor')]]) async def test_receive_data_polling(sensor: sensor_abc.AbstractAdvancedSensor, mock_messenger: mock.AsyncMock, can_message_notifier: MockCanMessageNotifier) -> None: def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' can_message_notifier.notify(ReadFromSensorResponse(payload=ReadFromSensorResponsePayload(sensor_data=Int32Field(256), sensor_id=SensorIdField(SensorId.S0), sensor=SensorTypeField(sensor._sensor_type))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=NodeId.host, function_code=0, originating_node_id=node_id))) mock_messenger.send.side_effect = responder return_data = (await sensor.get_baseline(mock_messenger, NodeId.pipette_left, 10, 10)) assert (return_data == SensorDataType.build([0, 1, 0]))
@pytest.mark.parametrize(argnames=['sensor', 'node', 'message'], argvalues=[[lazy_fixture('pressure_sensor'), NodeId.pipette_left, WriteToSensorRequest(payload=WriteToSensorRequestPayload(sensor=SensorTypeField(SensorType.pressure), sensor_id=SensorIdField(SensorId.S0), data=UInt32Field(SensorDataType.build([2, 2, 0, 0]).to_int), reg_address=UInt8Field(0)))], [lazy_fixture('capacitive_sensor'), NodeId.pipette_left, WriteToSensorRequest(payload=WriteToSensorRequestPayload(sensor=SensorTypeField(SensorType.capacitive), sensor_id=SensorIdField(SensorId.S0), data=UInt32Field(SensorDataType.build([2, 2, 0, 0]).to_int), reg_address=UInt8Field(0)))], [lazy_fixture('temperature_sensor'), NodeId.pipette_left, WriteToSensorRequest(payload=WriteToSensorRequestPayload(sensor=SensorTypeField(SensorType.temperature), sensor_id=SensorIdField(SensorId.S0), data=UInt32Field(SensorDataType.build([2, 2, 0, 0]).to_int), reg_address=UInt8Field(0)))], [lazy_fixture('humidity_sensor'), NodeId.pipette_left, WriteToSensorRequest(payload=WriteToSensorRequestPayload(sensor=SensorTypeField(SensorType.humidity), sensor_id=SensorIdField(SensorId.S0), data=UInt32Field(SensorDataType.build([2, 2, 0, 0]).to_int), reg_address=UInt8Field(0)))]]) async def test_write(sensor: sensor_abc.AbstractAdvancedSensor, node: NodeId, message: MessageDefinition) -> None: 'Check that writing sensor data is successful.' data = SensorDataType.build([2, 2, 0, 0]) messenger = mock.AsyncMock(spec=CanMessenger) (await sensor.write(messenger, NodeId.pipette_left, data)) messenger.send.assert_called_once_with(node_id=node, message=message)
9,097,184,948,181,648,000
Check that writing sensor data is successful.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
test_write
Opentrons/protocol_framework
python
@pytest.mark.parametrize(argnames=['sensor', 'node', 'message'], argvalues=[[lazy_fixture('pressure_sensor'), NodeId.pipette_left, WriteToSensorRequest(payload=WriteToSensorRequestPayload(sensor=SensorTypeField(SensorType.pressure), sensor_id=SensorIdField(SensorId.S0), data=UInt32Field(SensorDataType.build([2, 2, 0, 0]).to_int), reg_address=UInt8Field(0)))], [lazy_fixture('capacitive_sensor'), NodeId.pipette_left, WriteToSensorRequest(payload=WriteToSensorRequestPayload(sensor=SensorTypeField(SensorType.capacitive), sensor_id=SensorIdField(SensorId.S0), data=UInt32Field(SensorDataType.build([2, 2, 0, 0]).to_int), reg_address=UInt8Field(0)))], [lazy_fixture('temperature_sensor'), NodeId.pipette_left, WriteToSensorRequest(payload=WriteToSensorRequestPayload(sensor=SensorTypeField(SensorType.temperature), sensor_id=SensorIdField(SensorId.S0), data=UInt32Field(SensorDataType.build([2, 2, 0, 0]).to_int), reg_address=UInt8Field(0)))], [lazy_fixture('humidity_sensor'), NodeId.pipette_left, WriteToSensorRequest(payload=WriteToSensorRequestPayload(sensor=SensorTypeField(SensorType.humidity), sensor_id=SensorIdField(SensorId.S0), data=UInt32Field(SensorDataType.build([2, 2, 0, 0]).to_int), reg_address=UInt8Field(0)))]]) async def test_write(sensor: sensor_abc.AbstractAdvancedSensor, node: NodeId, message: MessageDefinition) -> None: data = SensorDataType.build([2, 2, 0, 0]) messenger = mock.AsyncMock(spec=CanMessenger) (await sensor.write(messenger, NodeId.pipette_left, data)) messenger.send.assert_called_once_with(node_id=node, message=message)
@pytest.mark.parametrize(argnames=['sensor', 'node', 'message'], argvalues=[[lazy_fixture('pressure_sensor'), NodeId.pipette_left, ReadFromSensorRequest(payload=ReadFromSensorRequestPayload(sensor=SensorTypeField(SensorType.pressure), sensor_id=SensorIdField(SensorId.S0), offset_reading=UInt8Field(False)))], [lazy_fixture('capacitive_sensor'), NodeId.pipette_left, ReadFromSensorRequest(payload=ReadFromSensorRequestPayload(sensor=SensorTypeField(SensorType.capacitive), sensor_id=SensorIdField(SensorId.S0), offset_reading=UInt8Field(False)))], [lazy_fixture('temperature_sensor'), NodeId.pipette_left, ReadFromSensorRequest(payload=ReadFromSensorRequestPayload(sensor=SensorTypeField(SensorType.temperature), sensor_id=SensorIdField(SensorId.S0), offset_reading=UInt8Field(False)))], [lazy_fixture('humidity_sensor'), NodeId.pipette_left, ReadFromSensorRequest(payload=ReadFromSensorRequestPayload(sensor=SensorTypeField(SensorType.humidity), sensor_id=SensorIdField(SensorId.S0), offset_reading=UInt8Field(False)))]]) async def test_read(sensor: sensor_abc.AbstractAdvancedSensor, node: NodeId, message: MessageDefinition) -> None: 'Test that a read function sends the expected message.' messenger = mock.AsyncMock(spec=CanMessenger) (await sensor.read(messenger, node, False)) messenger.send.assert_called_once_with(node_id=node, message=message)
-3,859,175,399,935,780,000
Test that a read function sends the expected message.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
test_read
Opentrons/protocol_framework
python
@pytest.mark.parametrize(argnames=['sensor', 'node', 'message'], argvalues=[[lazy_fixture('pressure_sensor'), NodeId.pipette_left, ReadFromSensorRequest(payload=ReadFromSensorRequestPayload(sensor=SensorTypeField(SensorType.pressure), sensor_id=SensorIdField(SensorId.S0), offset_reading=UInt8Field(False)))], [lazy_fixture('capacitive_sensor'), NodeId.pipette_left, ReadFromSensorRequest(payload=ReadFromSensorRequestPayload(sensor=SensorTypeField(SensorType.capacitive), sensor_id=SensorIdField(SensorId.S0), offset_reading=UInt8Field(False)))], [lazy_fixture('temperature_sensor'), NodeId.pipette_left, ReadFromSensorRequest(payload=ReadFromSensorRequestPayload(sensor=SensorTypeField(SensorType.temperature), sensor_id=SensorIdField(SensorId.S0), offset_reading=UInt8Field(False)))], [lazy_fixture('humidity_sensor'), NodeId.pipette_left, ReadFromSensorRequest(payload=ReadFromSensorRequestPayload(sensor=SensorTypeField(SensorType.humidity), sensor_id=SensorIdField(SensorId.S0), offset_reading=UInt8Field(False)))]]) async def test_read(sensor: sensor_abc.AbstractAdvancedSensor, node: NodeId, message: MessageDefinition) -> None: messenger = mock.AsyncMock(spec=CanMessenger) (await sensor.read(messenger, node, False)) messenger.send.assert_called_once_with(node_id=node, message=message)
@pytest.mark.parametrize(argnames=['sensor'], argvalues=[[lazy_fixture('pressure_sensor')], [lazy_fixture('capacitive_sensor')], [lazy_fixture('temperature_sensor')], [lazy_fixture('humidity_sensor')]]) async def test_receive_data_read(sensor: sensor_abc.AbstractAdvancedSensor, mock_messenger: mock.AsyncMock, can_message_notifier: MockCanMessageNotifier) -> None: 'Test that data is received from the read function.' def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' can_message_notifier.notify(ReadFromSensorResponse(payload=ReadFromSensorResponsePayload(sensor_data=Int32Field(256), sensor_id=SensorIdField(SensorId.S0), sensor=SensorTypeField(sensor._sensor_type))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=NodeId.host, function_code=0, originating_node_id=node_id))) mock_messenger.send.side_effect = responder return_data = (await sensor.read(mock_messenger, NodeId.pipette_left, False, 10)) assert (return_data == SensorDataType.build([0, 1, 0]))
-7,847,721,743,718,292,000
Test that data is received from the read function.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
test_receive_data_read
Opentrons/protocol_framework
python
@pytest.mark.parametrize(argnames=['sensor'], argvalues=[[lazy_fixture('pressure_sensor')], [lazy_fixture('capacitive_sensor')], [lazy_fixture('temperature_sensor')], [lazy_fixture('humidity_sensor')]]) async def test_receive_data_read(sensor: sensor_abc.AbstractAdvancedSensor, mock_messenger: mock.AsyncMock, can_message_notifier: MockCanMessageNotifier) -> None: def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' can_message_notifier.notify(ReadFromSensorResponse(payload=ReadFromSensorResponsePayload(sensor_data=Int32Field(256), sensor_id=SensorIdField(SensorId.S0), sensor=SensorTypeField(sensor._sensor_type))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=NodeId.host, function_code=0, originating_node_id=node_id))) mock_messenger.send.side_effect = responder return_data = (await sensor.read(mock_messenger, NodeId.pipette_left, False, 10)) assert (return_data == SensorDataType.build([0, 1, 0]))
@pytest.mark.parametrize(argnames=['sensor'], argvalues=[[lazy_fixture('pressure_sensor')], [lazy_fixture('capacitive_sensor')]]) async def test_threshold(sensor: sensor_abc.AbstractAdvancedSensor, mock_messenger: mock.AsyncMock, can_message_notifier: MockCanMessageNotifier) -> None: 'Test that data is received from the threshold function.' def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' if isinstance(message, SetSensorThresholdRequest): can_message_notifier.notify(SensorThresholdResponse(payload=SensorThresholdResponsePayload(threshold=message.payload.threshold, sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), mode=message.payload.mode)), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=NodeId.host, function_code=0, originating_node_id=node_id))) threshold = SensorDataType.build([0, 5]) mock_messenger.send.side_effect = responder return_data = (await sensor.send_zero_threshold(mock_messenger, NodeId.pipette_left, threshold, 10)) assert (return_data == threshold)
-666,913,451,662,296,800
Test that data is received from the threshold function.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
test_threshold
Opentrons/protocol_framework
python
@pytest.mark.parametrize(argnames=['sensor'], argvalues=[[lazy_fixture('pressure_sensor')], [lazy_fixture('capacitive_sensor')]]) async def test_threshold(sensor: sensor_abc.AbstractAdvancedSensor, mock_messenger: mock.AsyncMock, can_message_notifier: MockCanMessageNotifier) -> None: def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' if isinstance(message, SetSensorThresholdRequest): can_message_notifier.notify(SensorThresholdResponse(payload=SensorThresholdResponsePayload(threshold=message.payload.threshold, sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), mode=message.payload.mode)), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=NodeId.host, function_code=0, originating_node_id=node_id))) threshold = SensorDataType.build([0, 5]) mock_messenger.send.side_effect = responder return_data = (await sensor.send_zero_threshold(mock_messenger, NodeId.pipette_left, threshold, 10)) assert (return_data == threshold)
@pytest.mark.parametrize(argnames=['node_id', 'timeout', 'sensor'], argvalues=[[NodeId.pipette_left, 1, lazy_fixture('pressure_sensor')], [NodeId.pipette_left, 5, lazy_fixture('capacitive_sensor')]]) async def test_bind_to_sync(mock_messenger: mock.AsyncMock, can_message_notifier: MockCanMessageNotifier, sensor: sensor_abc.AbstractAdvancedSensor, node_id: NodeId, timeout: int) -> None: 'Test for bind_to_sync.\n\n Tests that bind_to_sync does in fact\n send out a BindSensorOutputRequest.\n ' async with sensor.bind_output(mock_messenger, node_id, SensorOutputBinding.sync): mock_messenger.send.assert_called_with(node_id=node_id, message=BindSensorOutputRequest(payload=BindSensorOutputRequestPayload(sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), binding=SensorOutputBindingField(SensorOutputBinding.sync)))) mock_messenger.send.assert_called_with(node_id=node_id, message=BindSensorOutputRequest(payload=BindSensorOutputRequestPayload(sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), binding=SensorOutputBindingField(SensorOutputBinding.none))))
-859,439,439,960,516,100
Test for bind_to_sync. Tests that bind_to_sync does in fact send out a BindSensorOutputRequest.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
test_bind_to_sync
Opentrons/protocol_framework
python
@pytest.mark.parametrize(argnames=['node_id', 'timeout', 'sensor'], argvalues=[[NodeId.pipette_left, 1, lazy_fixture('pressure_sensor')], [NodeId.pipette_left, 5, lazy_fixture('capacitive_sensor')]]) async def test_bind_to_sync(mock_messenger: mock.AsyncMock, can_message_notifier: MockCanMessageNotifier, sensor: sensor_abc.AbstractAdvancedSensor, node_id: NodeId, timeout: int) -> None: 'Test for bind_to_sync.\n\n Tests that bind_to_sync does in fact\n send out a BindSensorOutputRequest.\n ' async with sensor.bind_output(mock_messenger, node_id, SensorOutputBinding.sync): mock_messenger.send.assert_called_with(node_id=node_id, message=BindSensorOutputRequest(payload=BindSensorOutputRequestPayload(sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), binding=SensorOutputBindingField(SensorOutputBinding.sync)))) mock_messenger.send.assert_called_with(node_id=node_id, message=BindSensorOutputRequest(payload=BindSensorOutputRequestPayload(sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), binding=SensorOutputBindingField(SensorOutputBinding.none))))
@pytest.mark.parametrize(argnames=['sensor', 'node_id', 'timeout'], argvalues=[[lazy_fixture('capacitive_sensor'), NodeId.pipette_right, 10], [lazy_fixture('pressure_sensor'), NodeId.pipette_left, 2]]) async def test_get_baseline(mock_messenger: mock.AsyncMock, can_message_notifier: MockCanMessageNotifier, sensor: sensor_abc.AbstractAdvancedSensor, node_id: NodeId, timeout: int) -> None: 'Test for get_baseline.\n\n Tests that a BaselineSensorRequest gets sent,\n and reads ReadFromSensorResponse message containing the\n correct information.\n ' def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' if isinstance(message, BaselineSensorRequest): can_message_notifier.notify(ReadFromSensorResponse(payload=ReadFromSensorResponsePayload(sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), sensor_data=Int32Field(50))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=node_id, function_code=0, originating_node_id=node_id))) mock_messenger.send.side_effect = responder baseline = (await sensor.get_baseline(mock_messenger, node_id, 100, timeout)) assert (baseline == SensorDataType.build(Int32Field(50)))
-1,316,188,757,608,001,800
Test for get_baseline. Tests that a BaselineSensorRequest gets sent, and reads ReadFromSensorResponse message containing the correct information.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
test_get_baseline
Opentrons/protocol_framework
python
@pytest.mark.parametrize(argnames=['sensor', 'node_id', 'timeout'], argvalues=[[lazy_fixture('capacitive_sensor'), NodeId.pipette_right, 10], [lazy_fixture('pressure_sensor'), NodeId.pipette_left, 2]]) async def test_get_baseline(mock_messenger: mock.AsyncMock, can_message_notifier: MockCanMessageNotifier, sensor: sensor_abc.AbstractAdvancedSensor, node_id: NodeId, timeout: int) -> None: 'Test for get_baseline.\n\n Tests that a BaselineSensorRequest gets sent,\n and reads ReadFromSensorResponse message containing the\n correct information.\n ' def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' if isinstance(message, BaselineSensorRequest): can_message_notifier.notify(ReadFromSensorResponse(payload=ReadFromSensorResponsePayload(sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), sensor_data=Int32Field(50))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=node_id, function_code=0, originating_node_id=node_id))) mock_messenger.send.side_effect = responder baseline = (await sensor.get_baseline(mock_messenger, node_id, 100, timeout)) assert (baseline == SensorDataType.build(Int32Field(50)))
@pytest.mark.parametrize(argnames=['sensor', 'node_id', 'timeout'], argvalues=[[lazy_fixture('capacitive_sensor'), NodeId.pipette_left, 2], [lazy_fixture('pressure_sensor'), NodeId.pipette_right, 3]]) async def test_debug_poll(mock_messenger: mock.AsyncMock, sensor: sensor_abc.AbstractAdvancedSensor, node_id: NodeId, timeout: int) -> None: 'Test for debug poll.' async with sensor.bind_output(mock_messenger, node_id, SensorOutputBinding.report): for i in range(2): with patch.object(sensor._scheduler, '_wait_for_response', new=AsyncMock(return_value=SensorDataType.build(50))): data = (await sensor.get_report(node_id, mock_messenger, timeout)) assert (data == SensorDataType.build(Int32Field(50))) mock_messenger.send.assert_called_with(node_id=node_id, message=BindSensorOutputRequest(payload=BindSensorOutputRequestPayload(sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), binding=SensorOutputBindingField(SensorOutputBinding.none))))
-8,902,417,442,430,636,000
Test for debug poll.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
test_debug_poll
Opentrons/protocol_framework
python
@pytest.mark.parametrize(argnames=['sensor', 'node_id', 'timeout'], argvalues=[[lazy_fixture('capacitive_sensor'), NodeId.pipette_left, 2], [lazy_fixture('pressure_sensor'), NodeId.pipette_right, 3]]) async def test_debug_poll(mock_messenger: mock.AsyncMock, sensor: sensor_abc.AbstractAdvancedSensor, node_id: NodeId, timeout: int) -> None: async with sensor.bind_output(mock_messenger, node_id, SensorOutputBinding.report): for i in range(2): with patch.object(sensor._scheduler, '_wait_for_response', new=AsyncMock(return_value=SensorDataType.build(50))): data = (await sensor.get_report(node_id, mock_messenger, timeout)) assert (data == SensorDataType.build(Int32Field(50))) mock_messenger.send.assert_called_with(node_id=node_id, message=BindSensorOutputRequest(payload=BindSensorOutputRequestPayload(sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), binding=SensorOutputBindingField(SensorOutputBinding.none))))
@pytest.mark.parametrize(argnames=['sensor', 'node_id', 'timeout'], argvalues=[[lazy_fixture('capacitive_sensor'), NodeId.pipette_left, 2], [lazy_fixture('pressure_sensor'), NodeId.pipette_right, 3], [lazy_fixture('temperature_sensor'), NodeId.pipette_left, 2], [lazy_fixture('humidity_sensor'), NodeId.pipette_right, 2]]) async def test_peripheral_status(mock_messenger: mock.AsyncMock, can_message_notifier: MockCanMessageNotifier, sensor: sensor_abc.AbstractAdvancedSensor, node_id: NodeId, timeout: int) -> None: 'Test for getting peripheral device status.' def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' if isinstance(message, PeripheralStatusRequest): can_message_notifier.notify(PeripheralStatusResponse(payload=PeripheralStatusResponsePayload(sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), status=UInt8Field(1))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=node_id, function_code=0, originating_node_id=node_id))) mock_messenger.send.side_effect = responder status = (await sensor.get_device_status(mock_messenger, node_id, timeout)) assert status
3,318,708,624,848,140,300
Test for getting peripheral device status.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
test_peripheral_status
Opentrons/protocol_framework
python
@pytest.mark.parametrize(argnames=['sensor', 'node_id', 'timeout'], argvalues=[[lazy_fixture('capacitive_sensor'), NodeId.pipette_left, 2], [lazy_fixture('pressure_sensor'), NodeId.pipette_right, 3], [lazy_fixture('temperature_sensor'), NodeId.pipette_left, 2], [lazy_fixture('humidity_sensor'), NodeId.pipette_right, 2]]) async def test_peripheral_status(mock_messenger: mock.AsyncMock, can_message_notifier: MockCanMessageNotifier, sensor: sensor_abc.AbstractAdvancedSensor, node_id: NodeId, timeout: int) -> None: def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' if isinstance(message, PeripheralStatusRequest): can_message_notifier.notify(PeripheralStatusResponse(payload=PeripheralStatusResponsePayload(sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), status=UInt8Field(1))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=node_id, function_code=0, originating_node_id=node_id))) mock_messenger.send.side_effect = responder status = (await sensor.get_device_status(mock_messenger, node_id, timeout)) assert status
def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' can_message_notifier.notify(ReadFromSensorResponse(payload=ReadFromSensorResponsePayload(sensor_data=Int32Field(256), sensor_id=SensorIdField(SensorId.S0), sensor=SensorTypeField(sensor._sensor_type))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=NodeId.host, function_code=0, originating_node_id=node_id)))
-330,064,312,598,272,060
Message responder.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
responder
Opentrons/protocol_framework
python
def responder(node_id: NodeId, message: MessageDefinition) -> None: can_message_notifier.notify(ReadFromSensorResponse(payload=ReadFromSensorResponsePayload(sensor_data=Int32Field(256), sensor_id=SensorIdField(SensorId.S0), sensor=SensorTypeField(sensor._sensor_type))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=NodeId.host, function_code=0, originating_node_id=node_id)))
def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' can_message_notifier.notify(ReadFromSensorResponse(payload=ReadFromSensorResponsePayload(sensor_data=Int32Field(256), sensor_id=SensorIdField(SensorId.S0), sensor=SensorTypeField(sensor._sensor_type))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=NodeId.host, function_code=0, originating_node_id=node_id)))
-330,064,312,598,272,060
Message responder.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
responder
Opentrons/protocol_framework
python
def responder(node_id: NodeId, message: MessageDefinition) -> None: can_message_notifier.notify(ReadFromSensorResponse(payload=ReadFromSensorResponsePayload(sensor_data=Int32Field(256), sensor_id=SensorIdField(SensorId.S0), sensor=SensorTypeField(sensor._sensor_type))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=NodeId.host, function_code=0, originating_node_id=node_id)))
def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' if isinstance(message, SetSensorThresholdRequest): can_message_notifier.notify(SensorThresholdResponse(payload=SensorThresholdResponsePayload(threshold=message.payload.threshold, sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), mode=message.payload.mode)), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=NodeId.host, function_code=0, originating_node_id=node_id)))
7,150,935,569,391,111,000
Message responder.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
responder
Opentrons/protocol_framework
python
def responder(node_id: NodeId, message: MessageDefinition) -> None: if isinstance(message, SetSensorThresholdRequest): can_message_notifier.notify(SensorThresholdResponse(payload=SensorThresholdResponsePayload(threshold=message.payload.threshold, sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), mode=message.payload.mode)), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=NodeId.host, function_code=0, originating_node_id=node_id)))
def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' if isinstance(message, BaselineSensorRequest): can_message_notifier.notify(ReadFromSensorResponse(payload=ReadFromSensorResponsePayload(sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), sensor_data=Int32Field(50))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=node_id, function_code=0, originating_node_id=node_id)))
-778,312,438,194,596,100
Message responder.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
responder
Opentrons/protocol_framework
python
def responder(node_id: NodeId, message: MessageDefinition) -> None: if isinstance(message, BaselineSensorRequest): can_message_notifier.notify(ReadFromSensorResponse(payload=ReadFromSensorResponsePayload(sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), sensor_data=Int32Field(50))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=node_id, function_code=0, originating_node_id=node_id)))
def responder(node_id: NodeId, message: MessageDefinition) -> None: 'Message responder.' if isinstance(message, PeripheralStatusRequest): can_message_notifier.notify(PeripheralStatusResponse(payload=PeripheralStatusResponsePayload(sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), status=UInt8Field(1))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=node_id, function_code=0, originating_node_id=node_id)))
-2,441,369,327,342,985,000
Message responder.
hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py
responder
Opentrons/protocol_framework
python
def responder(node_id: NodeId, message: MessageDefinition) -> None: if isinstance(message, PeripheralStatusRequest): can_message_notifier.notify(PeripheralStatusResponse(payload=PeripheralStatusResponsePayload(sensor=SensorTypeField(sensor._sensor_type), sensor_id=SensorIdField(SensorId.S0), status=UInt8Field(1))), ArbitrationId(parts=ArbitrationIdParts(message_id=ReadFromSensorResponse.message_id, node_id=node_id, function_code=0, originating_node_id=node_id)))
def has_mongo(): 'Determine if MongoDB is up and usable' if os.environ.get('MP_FAKEMONGO'): mongo = False else: try: pymongo.MongoClient() mongo = True except: mongo = False return mongo
2,335,455,765,548,238,300
Determine if MongoDB is up and usable
pymatgen/db/tests/common.py
has_mongo
chc273/pymatgen-db
python
def has_mongo(): if os.environ.get('MP_FAKEMONGO'): mongo = False else: try: pymongo.MongoClient() mongo = True except: mongo = False return mongo
def connect(self, clear=False): 'Connect to Mongo DB\n\n :return: pymongo Database\n ' c = pymongo.MongoClient() db = c[self.DB] if clear: for coll in (self.SRC, self.DST): db[coll].remove() tcoll = ((coll + '.') + CollectionTracker.TRACKING_NAME) db[tcoll].remove() return db
8,094,381,745,248,001,000
Connect to Mongo DB :return: pymongo Database
pymatgen/db/tests/common.py
connect
chc273/pymatgen-db
python
def connect(self, clear=False): 'Connect to Mongo DB\n\n :return: pymongo Database\n ' c = pymongo.MongoClient() db = c[self.DB] if clear: for coll in (self.SRC, self.DST): db[coll].remove() tcoll = ((coll + '.') + CollectionTracker.TRACKING_NAME) db[tcoll].remove() return db
def run_command(self, args, options): 'Run the command-line given by the list\n in `args`, adding the dictionary given by\n options as long-form --{key}=value pairs.\n ' for (key, value) in options: args.append('--{}'.format(key)) if value: args.append(value) return subprocess.call(args)
6,931,213,289,327,645,000
Run the command-line given by the list in `args`, adding the dictionary given by options as long-form --{key}=value pairs.
pymatgen/db/tests/common.py
run_command
chc273/pymatgen-db
python
def run_command(self, args, options): 'Run the command-line given by the list\n in `args`, adding the dictionary given by\n options as long-form --{key}=value pairs.\n ' for (key, value) in options: args.append('--{}'.format(key)) if value: args.append(value) return subprocess.call(args)
def myfunc(self, x, slope, intercept): '\n input, slope, intercept\n ' return ((slope * x) + intercept)
-5,121,193,954,077,837,000
input, slope, intercept
helloword/ml.py
myfunc
badpaybad/mypython
python
def myfunc(self, x, slope, intercept): '\n \n ' return ((slope * x) + intercept)
async def begin_create_or_update(self, resource_group_name: str, gallery_name: str, gallery_image_name: str, gallery_image: '_models.GalleryImage', **kwargs) -> AsyncLROPoller['_models.GalleryImage']: "Create or update a gallery Image Definition.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to\n be created.\n :type gallery_name: str\n :param gallery_image_name: The name of the gallery Image Definition to be created or updated.\n The allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the\n middle. The maximum length is 80 characters.\n :type gallery_image_name: str\n :param gallery_image: Parameters supplied to the create or update gallery image operation.\n :type gallery_image: ~azure.mgmt.compute.v2019_03_01.models.GalleryImage\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either GalleryImage or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2019_03_01.models.GalleryImage]\n :raises ~azure.core.exceptions.HttpResponseError:\n " polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._create_or_update_initial(resource_group_name=resource_group_name, gallery_name=gallery_name, gallery_image_name=gallery_image_name, gallery_image=gallery_image, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('GalleryImage', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'galleryName': self._serialize.url('gallery_name', gallery_name, 'str'), 'galleryImageName': self._serialize.url('gallery_image_name', gallery_image_name, 'str')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
-4,321,324,683,486,622,700
Create or update a gallery Image Definition. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to be created. :type gallery_name: str :param gallery_image_name: The name of the gallery Image Definition to be created or updated. The allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the middle. The maximum length is 80 characters. :type gallery_image_name: str :param gallery_image: Parameters supplied to the create or update gallery image operation. :type gallery_image: ~azure.mgmt.compute.v2019_03_01.models.GalleryImage :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either GalleryImage or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2019_03_01.models.GalleryImage] :raises ~azure.core.exceptions.HttpResponseError:
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/aio/operations/_gallery_images_operations.py
begin_create_or_update
Darkstar1t/azure-sdk-for-python
python
async def begin_create_or_update(self, resource_group_name: str, gallery_name: str, gallery_image_name: str, gallery_image: '_models.GalleryImage', **kwargs) -> AsyncLROPoller['_models.GalleryImage']: "Create or update a gallery Image Definition.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to\n be created.\n :type gallery_name: str\n :param gallery_image_name: The name of the gallery Image Definition to be created or updated.\n The allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the\n middle. The maximum length is 80 characters.\n :type gallery_image_name: str\n :param gallery_image: Parameters supplied to the create or update gallery image operation.\n :type gallery_image: ~azure.mgmt.compute.v2019_03_01.models.GalleryImage\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either GalleryImage or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2019_03_01.models.GalleryImage]\n :raises ~azure.core.exceptions.HttpResponseError:\n " polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._create_or_update_initial(resource_group_name=resource_group_name, gallery_name=gallery_name, gallery_image_name=gallery_image_name, gallery_image=gallery_image, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('GalleryImage', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'galleryName': self._serialize.url('gallery_name', gallery_name, 'str'), 'galleryImageName': self._serialize.url('gallery_image_name', gallery_image_name, 'str')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def get(self, resource_group_name: str, gallery_name: str, gallery_image_name: str, **kwargs) -> '_models.GalleryImage': 'Retrieves information about a gallery Image Definition.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param gallery_name: The name of the Shared Image Gallery from which the Image Definitions are\n to be retrieved.\n :type gallery_name: str\n :param gallery_image_name: The name of the gallery Image Definition to be retrieved.\n :type gallery_image_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: GalleryImage, or the result of cls(response)\n :rtype: ~azure.mgmt.compute.v2019_03_01.models.GalleryImage\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2019-03-01' accept = 'application/json' url = self.get.metadata['url'] path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'galleryName': self._serialize.url('gallery_name', gallery_name, 'str'), 'galleryImageName': self._serialize.url('gallery_image_name', gallery_image_name, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('GalleryImage', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
5,591,217,699,664,159,000
Retrieves information about a gallery Image Definition. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param gallery_name: The name of the Shared Image Gallery from which the Image Definitions are to be retrieved. :type gallery_name: str :param gallery_image_name: The name of the gallery Image Definition to be retrieved. :type gallery_image_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: GalleryImage, or the result of cls(response) :rtype: ~azure.mgmt.compute.v2019_03_01.models.GalleryImage :raises: ~azure.core.exceptions.HttpResponseError
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/aio/operations/_gallery_images_operations.py
get
Darkstar1t/azure-sdk-for-python
python
async def get(self, resource_group_name: str, gallery_name: str, gallery_image_name: str, **kwargs) -> '_models.GalleryImage': 'Retrieves information about a gallery Image Definition.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param gallery_name: The name of the Shared Image Gallery from which the Image Definitions are\n to be retrieved.\n :type gallery_name: str\n :param gallery_image_name: The name of the gallery Image Definition to be retrieved.\n :type gallery_image_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: GalleryImage, or the result of cls(response)\n :rtype: ~azure.mgmt.compute.v2019_03_01.models.GalleryImage\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2019-03-01' accept = 'application/json' url = self.get.metadata['url'] path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'galleryName': self._serialize.url('gallery_name', gallery_name, 'str'), 'galleryImageName': self._serialize.url('gallery_image_name', gallery_image_name, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('GalleryImage', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
async def begin_delete(self, resource_group_name: str, gallery_name: str, gallery_image_name: str, **kwargs) -> AsyncLROPoller[None]: "Delete a gallery image.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to\n be deleted.\n :type gallery_name: str\n :param gallery_image_name: The name of the gallery Image Definition to be deleted.\n :type gallery_image_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n " polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._delete_initial(resource_group_name=resource_group_name, gallery_name=gallery_name, gallery_image_name=gallery_image_name, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'galleryName': self._serialize.url('gallery_name', gallery_name, 'str'), 'galleryImageName': self._serialize.url('gallery_image_name', gallery_image_name, 'str')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
7,484,213,028,850,654,000
Delete a gallery image. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to be deleted. :type gallery_name: str :param gallery_image_name: The name of the gallery Image Definition to be deleted. :type gallery_image_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError:
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/aio/operations/_gallery_images_operations.py
begin_delete
Darkstar1t/azure-sdk-for-python
python
async def begin_delete(self, resource_group_name: str, gallery_name: str, gallery_image_name: str, **kwargs) -> AsyncLROPoller[None]: "Delete a gallery image.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to\n be deleted.\n :type gallery_name: str\n :param gallery_image_name: The name of the gallery Image Definition to be deleted.\n :type gallery_image_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n " polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._delete_initial(resource_group_name=resource_group_name, gallery_name=gallery_name, gallery_image_name=gallery_image_name, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'galleryName': self._serialize.url('gallery_name', gallery_name, 'str'), 'galleryImageName': self._serialize.url('gallery_image_name', gallery_image_name, 'str')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
def list_by_gallery(self, resource_group_name: str, gallery_name: str, **kwargs) -> AsyncIterable['_models.GalleryImageList']: 'List gallery Image Definitions in a gallery.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param gallery_name: The name of the Shared Image Gallery from which Image Definitions are to\n be listed.\n :type gallery_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either GalleryImageList or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_03_01.models.GalleryImageList]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2019-03-01' accept = 'application/json' def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') if (not next_link): url = self.list_by_gallery.metadata['url'] path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'galleryName': self._serialize.url('gallery_name', gallery_name, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('GalleryImageList', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), AsyncList(list_of_elem)) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)
-6,598,339,211,277,231,000
List gallery Image Definitions in a gallery. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param gallery_name: The name of the Shared Image Gallery from which Image Definitions are to be listed. :type gallery_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either GalleryImageList or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_03_01.models.GalleryImageList] :raises: ~azure.core.exceptions.HttpResponseError
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/aio/operations/_gallery_images_operations.py
list_by_gallery
Darkstar1t/azure-sdk-for-python
python
def list_by_gallery(self, resource_group_name: str, gallery_name: str, **kwargs) -> AsyncIterable['_models.GalleryImageList']: 'List gallery Image Definitions in a gallery.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param gallery_name: The name of the Shared Image Gallery from which Image Definitions are to\n be listed.\n :type gallery_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either GalleryImageList or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_03_01.models.GalleryImageList]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2019-03-01' accept = 'application/json' def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') if (not next_link): url = self.list_by_gallery.metadata['url'] path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'galleryName': self._serialize.url('gallery_name', gallery_name, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('GalleryImageList', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), AsyncList(list_of_elem)) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)
def _InternalConstructMessage(full_name): 'Constructs a nested message.' from google.protobuf import symbol_database return symbol_database.Default().GetSymbol(full_name)()
-4,080,538,055,633,935,000
Constructs a nested message.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
_InternalConstructMessage
JustinACoder/H22-GR3-UnrealAI
python
def _InternalConstructMessage(full_name): from google.protobuf import symbol_database return symbol_database.Default().GetSymbol(full_name)()
def __eq__(self, other_msg): 'Recursively compares two messages by value and structure.' raise NotImplementedError
3,488,240,450,900,297,000
Recursively compares two messages by value and structure.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
__eq__
JustinACoder/H22-GR3-UnrealAI
python
def __eq__(self, other_msg): raise NotImplementedError
def __str__(self): 'Outputs a human-readable representation of the message.' raise NotImplementedError
-275,227,890,260,641,820
Outputs a human-readable representation of the message.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
__str__
JustinACoder/H22-GR3-UnrealAI
python
def __str__(self): raise NotImplementedError
def __unicode__(self): 'Outputs a human-readable representation of the message.' raise NotImplementedError
-7,489,656,935,393,816,000
Outputs a human-readable representation of the message.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
__unicode__
JustinACoder/H22-GR3-UnrealAI
python
def __unicode__(self): raise NotImplementedError
def MergeFrom(self, other_msg): 'Merges the contents of the specified message into current message.\n\n This method merges the contents of the specified message into the current\n message. Singular fields that are set in the specified message overwrite\n the corresponding fields in the current message. Repeated fields are\n appended. Singular sub-messages and groups are recursively merged.\n\n Args:\n other_msg (Message): A message to merge into the current message.\n ' raise NotImplementedError
-5,259,389,814,106,578,000
Merges the contents of the specified message into current message. This method merges the contents of the specified message into the current message. Singular fields that are set in the specified message overwrite the corresponding fields in the current message. Repeated fields are appended. Singular sub-messages and groups are recursively merged. Args: other_msg (Message): A message to merge into the current message.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
MergeFrom
JustinACoder/H22-GR3-UnrealAI
python
def MergeFrom(self, other_msg): 'Merges the contents of the specified message into current message.\n\n This method merges the contents of the specified message into the current\n message. Singular fields that are set in the specified message overwrite\n the corresponding fields in the current message. Repeated fields are\n appended. Singular sub-messages and groups are recursively merged.\n\n Args:\n other_msg (Message): A message to merge into the current message.\n ' raise NotImplementedError
def CopyFrom(self, other_msg): 'Copies the content of the specified message into the current message.\n\n The method clears the current message and then merges the specified\n message using MergeFrom.\n\n Args:\n other_msg (Message): A message to copy into the current one.\n ' if (self is other_msg): return self.Clear() self.MergeFrom(other_msg)
3,342,220,506,585,992,000
Copies the content of the specified message into the current message. The method clears the current message and then merges the specified message using MergeFrom. Args: other_msg (Message): A message to copy into the current one.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
CopyFrom
JustinACoder/H22-GR3-UnrealAI
python
def CopyFrom(self, other_msg): 'Copies the content of the specified message into the current message.\n\n The method clears the current message and then merges the specified\n message using MergeFrom.\n\n Args:\n other_msg (Message): A message to copy into the current one.\n ' if (self is other_msg): return self.Clear() self.MergeFrom(other_msg)
def Clear(self): 'Clears all data that was set in the message.' raise NotImplementedError
-7,482,799,018,333,739,000
Clears all data that was set in the message.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
Clear
JustinACoder/H22-GR3-UnrealAI
python
def Clear(self): raise NotImplementedError
def SetInParent(self): 'Mark this as present in the parent.\n\n This normally happens automatically when you assign a field of a\n sub-message, but sometimes you want to make the sub-message\n present while keeping it empty. If you find yourself using this,\n you may want to reconsider your design.\n ' raise NotImplementedError
1,214,768,362,767,601,200
Mark this as present in the parent. This normally happens automatically when you assign a field of a sub-message, but sometimes you want to make the sub-message present while keeping it empty. If you find yourself using this, you may want to reconsider your design.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
SetInParent
JustinACoder/H22-GR3-UnrealAI
python
def SetInParent(self): 'Mark this as present in the parent.\n\n This normally happens automatically when you assign a field of a\n sub-message, but sometimes you want to make the sub-message\n present while keeping it empty. If you find yourself using this,\n you may want to reconsider your design.\n ' raise NotImplementedError
def IsInitialized(self): 'Checks if the message is initialized.\n\n Returns:\n bool: The method returns True if the message is initialized (i.e. all of\n its required fields are set).\n ' raise NotImplementedError
-6,005,976,788,694,109,000
Checks if the message is initialized. Returns: bool: The method returns True if the message is initialized (i.e. all of its required fields are set).
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
IsInitialized
JustinACoder/H22-GR3-UnrealAI
python
def IsInitialized(self): 'Checks if the message is initialized.\n\n Returns:\n bool: The method returns True if the message is initialized (i.e. all of\n its required fields are set).\n ' raise NotImplementedError
def MergeFromString(self, serialized): 'Merges serialized protocol buffer data into this message.\n\n When we find a field in `serialized` that is already present\n in this message:\n\n - If it\'s a "repeated" field, we append to the end of our list.\n - Else, if it\'s a scalar, we overwrite our field.\n - Else, (it\'s a nonrepeated composite), we recursively merge\n into the existing composite.\n\n Args:\n serialized (bytes): Any object that allows us to call\n ``memoryview(serialized)`` to access a string of bytes using the\n buffer interface.\n\n Returns:\n int: The number of bytes read from `serialized`.\n For non-group messages, this will always be `len(serialized)`,\n but for messages which are actually groups, this will\n generally be less than `len(serialized)`, since we must\n stop when we reach an ``END_GROUP`` tag. Note that if\n we *do* stop because of an ``END_GROUP`` tag, the number\n of bytes returned does not include the bytes\n for the ``END_GROUP`` tag information.\n\n Raises:\n DecodeError: if the input cannot be parsed.\n ' raise NotImplementedError
185,471,447,270,742,600
Merges serialized protocol buffer data into this message. When we find a field in `serialized` that is already present in this message: - If it's a "repeated" field, we append to the end of our list. - Else, if it's a scalar, we overwrite our field. - Else, (it's a nonrepeated composite), we recursively merge into the existing composite. Args: serialized (bytes): Any object that allows us to call ``memoryview(serialized)`` to access a string of bytes using the buffer interface. Returns: int: The number of bytes read from `serialized`. For non-group messages, this will always be `len(serialized)`, but for messages which are actually groups, this will generally be less than `len(serialized)`, since we must stop when we reach an ``END_GROUP`` tag. Note that if we *do* stop because of an ``END_GROUP`` tag, the number of bytes returned does not include the bytes for the ``END_GROUP`` tag information. Raises: DecodeError: if the input cannot be parsed.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
MergeFromString
JustinACoder/H22-GR3-UnrealAI
python
def MergeFromString(self, serialized): 'Merges serialized protocol buffer data into this message.\n\n When we find a field in `serialized` that is already present\n in this message:\n\n - If it\'s a "repeated" field, we append to the end of our list.\n - Else, if it\'s a scalar, we overwrite our field.\n - Else, (it\'s a nonrepeated composite), we recursively merge\n into the existing composite.\n\n Args:\n serialized (bytes): Any object that allows us to call\n ``memoryview(serialized)`` to access a string of bytes using the\n buffer interface.\n\n Returns:\n int: The number of bytes read from `serialized`.\n For non-group messages, this will always be `len(serialized)`,\n but for messages which are actually groups, this will\n generally be less than `len(serialized)`, since we must\n stop when we reach an ``END_GROUP`` tag. Note that if\n we *do* stop because of an ``END_GROUP`` tag, the number\n of bytes returned does not include the bytes\n for the ``END_GROUP`` tag information.\n\n Raises:\n DecodeError: if the input cannot be parsed.\n ' raise NotImplementedError
def ParseFromString(self, serialized): 'Parse serialized protocol buffer data into this message.\n\n Like :func:`MergeFromString()`, except we clear the object first.\n ' self.Clear() return self.MergeFromString(serialized)
-5,109,456,638,149,453,000
Parse serialized protocol buffer data into this message. Like :func:`MergeFromString()`, except we clear the object first.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
ParseFromString
JustinACoder/H22-GR3-UnrealAI
python
def ParseFromString(self, serialized): 'Parse serialized protocol buffer data into this message.\n\n Like :func:`MergeFromString()`, except we clear the object first.\n ' self.Clear() return self.MergeFromString(serialized)
def SerializeToString(self, **kwargs): "Serializes the protocol message to a binary string.\n\n Keyword Args:\n deterministic (bool): If true, requests deterministic serialization\n of the protobuf, with predictable ordering of map keys.\n\n Returns:\n A binary string representation of the message if all of the required\n fields in the message are set (i.e. the message is initialized).\n\n Raises:\n EncodeError: if the message isn't initialized (see :func:`IsInitialized`).\n " raise NotImplementedError
7,587,470,124,439,645,000
Serializes the protocol message to a binary string. Keyword Args: deterministic (bool): If true, requests deterministic serialization of the protobuf, with predictable ordering of map keys. Returns: A binary string representation of the message if all of the required fields in the message are set (i.e. the message is initialized). Raises: EncodeError: if the message isn't initialized (see :func:`IsInitialized`).
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
SerializeToString
JustinACoder/H22-GR3-UnrealAI
python
def SerializeToString(self, **kwargs): "Serializes the protocol message to a binary string.\n\n Keyword Args:\n deterministic (bool): If true, requests deterministic serialization\n of the protobuf, with predictable ordering of map keys.\n\n Returns:\n A binary string representation of the message if all of the required\n fields in the message are set (i.e. the message is initialized).\n\n Raises:\n EncodeError: if the message isn't initialized (see :func:`IsInitialized`).\n " raise NotImplementedError
def SerializePartialToString(self, **kwargs): "Serializes the protocol message to a binary string.\n\n This method is similar to SerializeToString but doesn't check if the\n message is initialized.\n\n Keyword Args:\n deterministic (bool): If true, requests deterministic serialization\n of the protobuf, with predictable ordering of map keys.\n\n Returns:\n bytes: A serialized representation of the partial message.\n " raise NotImplementedError
-5,496,072,132,301,760,000
Serializes the protocol message to a binary string. This method is similar to SerializeToString but doesn't check if the message is initialized. Keyword Args: deterministic (bool): If true, requests deterministic serialization of the protobuf, with predictable ordering of map keys. Returns: bytes: A serialized representation of the partial message.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
SerializePartialToString
JustinACoder/H22-GR3-UnrealAI
python
def SerializePartialToString(self, **kwargs): "Serializes the protocol message to a binary string.\n\n This method is similar to SerializeToString but doesn't check if the\n message is initialized.\n\n Keyword Args:\n deterministic (bool): If true, requests deterministic serialization\n of the protobuf, with predictable ordering of map keys.\n\n Returns:\n bytes: A serialized representation of the partial message.\n " raise NotImplementedError
def ListFields(self): 'Returns a list of (FieldDescriptor, value) tuples for present fields.\n\n A message field is non-empty if HasField() would return true. A singular\n primitive field is non-empty if HasField() would return true in proto2 or it\n is non zero in proto3. A repeated field is non-empty if it contains at least\n one element. The fields are ordered by field number.\n\n Returns:\n list[tuple(FieldDescriptor, value)]: field descriptors and values\n for all fields in the message which are not empty. The values vary by\n field type.\n ' raise NotImplementedError
7,323,349,097,671,602,000
Returns a list of (FieldDescriptor, value) tuples for present fields. A message field is non-empty if HasField() would return true. A singular primitive field is non-empty if HasField() would return true in proto2 or it is non zero in proto3. A repeated field is non-empty if it contains at least one element. The fields are ordered by field number. Returns: list[tuple(FieldDescriptor, value)]: field descriptors and values for all fields in the message which are not empty. The values vary by field type.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
ListFields
JustinACoder/H22-GR3-UnrealAI
python
def ListFields(self): 'Returns a list of (FieldDescriptor, value) tuples for present fields.\n\n A message field is non-empty if HasField() would return true. A singular\n primitive field is non-empty if HasField() would return true in proto2 or it\n is non zero in proto3. A repeated field is non-empty if it contains at least\n one element. The fields are ordered by field number.\n\n Returns:\n list[tuple(FieldDescriptor, value)]: field descriptors and values\n for all fields in the message which are not empty. The values vary by\n field type.\n ' raise NotImplementedError
def HasField(self, field_name): 'Checks if a certain field is set for the message.\n\n For a oneof group, checks if any field inside is set. Note that if the\n field_name is not defined in the message descriptor, :exc:`ValueError` will\n be raised.\n\n Args:\n field_name (str): The name of the field to check for presence.\n\n Returns:\n bool: Whether a value has been set for the named field.\n\n Raises:\n ValueError: if the `field_name` is not a member of this message.\n ' raise NotImplementedError
221,908,891,018,159,100
Checks if a certain field is set for the message. For a oneof group, checks if any field inside is set. Note that if the field_name is not defined in the message descriptor, :exc:`ValueError` will be raised. Args: field_name (str): The name of the field to check for presence. Returns: bool: Whether a value has been set for the named field. Raises: ValueError: if the `field_name` is not a member of this message.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
HasField
JustinACoder/H22-GR3-UnrealAI
python
def HasField(self, field_name): 'Checks if a certain field is set for the message.\n\n For a oneof group, checks if any field inside is set. Note that if the\n field_name is not defined in the message descriptor, :exc:`ValueError` will\n be raised.\n\n Args:\n field_name (str): The name of the field to check for presence.\n\n Returns:\n bool: Whether a value has been set for the named field.\n\n Raises:\n ValueError: if the `field_name` is not a member of this message.\n ' raise NotImplementedError
def ClearField(self, field_name): 'Clears the contents of a given field.\n\n Inside a oneof group, clears the field set. If the name neither refers to a\n defined field or oneof group, :exc:`ValueError` is raised.\n\n Args:\n field_name (str): The name of the field to check for presence.\n\n Raises:\n ValueError: if the `field_name` is not a member of this message.\n ' raise NotImplementedError
-4,180,667,636,435,284,500
Clears the contents of a given field. Inside a oneof group, clears the field set. If the name neither refers to a defined field or oneof group, :exc:`ValueError` is raised. Args: field_name (str): The name of the field to check for presence. Raises: ValueError: if the `field_name` is not a member of this message.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
ClearField
JustinACoder/H22-GR3-UnrealAI
python
def ClearField(self, field_name): 'Clears the contents of a given field.\n\n Inside a oneof group, clears the field set. If the name neither refers to a\n defined field or oneof group, :exc:`ValueError` is raised.\n\n Args:\n field_name (str): The name of the field to check for presence.\n\n Raises:\n ValueError: if the `field_name` is not a member of this message.\n ' raise NotImplementedError
def WhichOneof(self, oneof_group): 'Returns the name of the field that is set inside a oneof group.\n\n If no field is set, returns None.\n\n Args:\n oneof_group (str): the name of the oneof group to check.\n\n Returns:\n str or None: The name of the group that is set, or None.\n\n Raises:\n ValueError: no group with the given name exists\n ' raise NotImplementedError
7,706,930,664,240,627,000
Returns the name of the field that is set inside a oneof group. If no field is set, returns None. Args: oneof_group (str): the name of the oneof group to check. Returns: str or None: The name of the group that is set, or None. Raises: ValueError: no group with the given name exists
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
WhichOneof
JustinACoder/H22-GR3-UnrealAI
python
def WhichOneof(self, oneof_group): 'Returns the name of the field that is set inside a oneof group.\n\n If no field is set, returns None.\n\n Args:\n oneof_group (str): the name of the oneof group to check.\n\n Returns:\n str or None: The name of the group that is set, or None.\n\n Raises:\n ValueError: no group with the given name exists\n ' raise NotImplementedError
def HasExtension(self, extension_handle): 'Checks if a certain extension is present for this message.\n\n Extensions are retrieved using the :attr:`Extensions` mapping (if present).\n\n Args:\n extension_handle: The handle for the extension to check.\n\n Returns:\n bool: Whether the extension is present for this message.\n\n Raises:\n KeyError: if the extension is repeated. Similar to repeated fields,\n there is no separate notion of presence: a "not present" repeated\n extension is an empty list.\n ' raise NotImplementedError
4,907,608,339,312,964,000
Checks if a certain extension is present for this message. Extensions are retrieved using the :attr:`Extensions` mapping (if present). Args: extension_handle: The handle for the extension to check. Returns: bool: Whether the extension is present for this message. Raises: KeyError: if the extension is repeated. Similar to repeated fields, there is no separate notion of presence: a "not present" repeated extension is an empty list.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
HasExtension
JustinACoder/H22-GR3-UnrealAI
python
def HasExtension(self, extension_handle): 'Checks if a certain extension is present for this message.\n\n Extensions are retrieved using the :attr:`Extensions` mapping (if present).\n\n Args:\n extension_handle: The handle for the extension to check.\n\n Returns:\n bool: Whether the extension is present for this message.\n\n Raises:\n KeyError: if the extension is repeated. Similar to repeated fields,\n there is no separate notion of presence: a "not present" repeated\n extension is an empty list.\n ' raise NotImplementedError
def ClearExtension(self, extension_handle): 'Clears the contents of a given extension.\n\n Args:\n extension_handle: The handle for the extension to clear.\n ' raise NotImplementedError
-8,967,653,825,512,342,000
Clears the contents of a given extension. Args: extension_handle: The handle for the extension to clear.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
ClearExtension
JustinACoder/H22-GR3-UnrealAI
python
def ClearExtension(self, extension_handle): 'Clears the contents of a given extension.\n\n Args:\n extension_handle: The handle for the extension to clear.\n ' raise NotImplementedError
def UnknownFields(self): 'Returns the UnknownFieldSet.\n\n Returns:\n UnknownFieldSet: The unknown fields stored in this message.\n ' raise NotImplementedError
5,739,973,485,065,129,000
Returns the UnknownFieldSet. Returns: UnknownFieldSet: The unknown fields stored in this message.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
UnknownFields
JustinACoder/H22-GR3-UnrealAI
python
def UnknownFields(self): 'Returns the UnknownFieldSet.\n\n Returns:\n UnknownFieldSet: The unknown fields stored in this message.\n ' raise NotImplementedError
def DiscardUnknownFields(self): 'Clears all fields in the :class:`UnknownFieldSet`.\n\n This operation is recursive for nested message.\n ' raise NotImplementedError
2,413,369,931,560,882,700
Clears all fields in the :class:`UnknownFieldSet`. This operation is recursive for nested message.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
DiscardUnknownFields
JustinACoder/H22-GR3-UnrealAI
python
def DiscardUnknownFields(self): 'Clears all fields in the :class:`UnknownFieldSet`.\n\n This operation is recursive for nested message.\n ' raise NotImplementedError
def ByteSize(self): 'Returns the serialized size of this message.\n\n Recursively calls ByteSize() on all contained messages.\n\n Returns:\n int: The number of bytes required to serialize this message.\n ' raise NotImplementedError
-7,189,590,014,356,224,000
Returns the serialized size of this message. Recursively calls ByteSize() on all contained messages. Returns: int: The number of bytes required to serialize this message.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
ByteSize
JustinACoder/H22-GR3-UnrealAI
python
def ByteSize(self): 'Returns the serialized size of this message.\n\n Recursively calls ByteSize() on all contained messages.\n\n Returns:\n int: The number of bytes required to serialize this message.\n ' raise NotImplementedError
def _SetListener(self, message_listener): 'Internal method used by the protocol message implementation.\n Clients should not call this directly.\n\n Sets a listener that this message will call on certain state transitions.\n\n The purpose of this method is to register back-edges from children to\n parents at runtime, for the purpose of setting "has" bits and\n byte-size-dirty bits in the parent and ancestor objects whenever a child or\n descendant object is modified.\n\n If the client wants to disconnect this Message from the object tree, she\n explicitly sets callback to None.\n\n If message_listener is None, unregisters any existing listener. Otherwise,\n message_listener must implement the MessageListener interface in\n internal/message_listener.py, and we discard any listener registered\n via a previous _SetListener() call.\n ' raise NotImplementedError
2,148,599,048,241,910,500
Internal method used by the protocol message implementation. Clients should not call this directly. Sets a listener that this message will call on certain state transitions. The purpose of this method is to register back-edges from children to parents at runtime, for the purpose of setting "has" bits and byte-size-dirty bits in the parent and ancestor objects whenever a child or descendant object is modified. If the client wants to disconnect this Message from the object tree, she explicitly sets callback to None. If message_listener is None, unregisters any existing listener. Otherwise, message_listener must implement the MessageListener interface in internal/message_listener.py, and we discard any listener registered via a previous _SetListener() call.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
_SetListener
JustinACoder/H22-GR3-UnrealAI
python
def _SetListener(self, message_listener): 'Internal method used by the protocol message implementation.\n Clients should not call this directly.\n\n Sets a listener that this message will call on certain state transitions.\n\n The purpose of this method is to register back-edges from children to\n parents at runtime, for the purpose of setting "has" bits and\n byte-size-dirty bits in the parent and ancestor objects whenever a child or\n descendant object is modified.\n\n If the client wants to disconnect this Message from the object tree, she\n explicitly sets callback to None.\n\n If message_listener is None, unregisters any existing listener. Otherwise,\n message_listener must implement the MessageListener interface in\n internal/message_listener.py, and we discard any listener registered\n via a previous _SetListener() call.\n ' raise NotImplementedError
def __getstate__(self): 'Support the pickle protocol.' return dict(serialized=self.SerializePartialToString())
5,659,051,950,600,202,000
Support the pickle protocol.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
__getstate__
JustinACoder/H22-GR3-UnrealAI
python
def __getstate__(self): return dict(serialized=self.SerializePartialToString())
def __setstate__(self, state): 'Support the pickle protocol.' self.__init__() serialized = state['serialized'] if (not isinstance(serialized, bytes)): serialized = serialized.encode('latin1') self.ParseFromString(serialized)
-2,837,338,939,880,771,000
Support the pickle protocol.
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py
__setstate__
JustinACoder/H22-GR3-UnrealAI
python
def __setstate__(self, state): self.__init__() serialized = state['serialized'] if (not isinstance(serialized, bytes)): serialized = serialized.encode('latin1') self.ParseFromString(serialized)
def train(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, test='mse', split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None: '[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n ' start_time = time.time() board = Tensorboard(log_dir) n_train = int((split * data.shape[0])) n_test = (data.shape[0] - n_train) (data_train, data_test) = torch.split(data, [n_train, n_test], dim=0) (target_train, target_test) = torch.split(target, [n_train, n_test], dim=0) convergence = Convergence(**convergence_kwargs) print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |') for iteration in np.arange(0, (max_iterations + 1)): (prediction, time_derivs, thetas) = model(data_train) MSE = torch.mean(((prediction - target_train) ** 2), dim=0) Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))]) loss = torch.sum((MSE + Reg)) optimizer.zero_grad() loss.backward() optimizer.step() if ((iteration % write_iterations) == 0): (prediction_test, coordinates) = model.func_approx(data_test) (time_derivs_test, thetas_test) = model.library((prediction_test, coordinates)) with torch.no_grad(): MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0) Reg_test = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))]) loss_test = torch.sum((MSE_test + Reg_test)) _ = model.sparse_estimator(thetas, time_derivs) estimator_coeff_vectors = model.estimator_coeffs() l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0) progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item()) board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test) if ((iteration % write_iterations) == 0): if (test == 'mse'): sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer) else: sparsity_scheduler(iteration, loss_test, model, optimizer) if (sparsity_scheduler.apply_sparsity is True): with torch.no_grad(): model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs) sparsity_scheduler.reset() convergence(iteration, torch.sum(l1_norm)) if (convergence.converged is True): print('Algorithm converged. Stopping training.') break board.close() if (log_dir is None): path = 'model.pt' else: path = join(log_dir, 'model.pt') torch.save(model.state_dict(), path)
-7,086,159,953,085,371,000
[summary] Args: model (DeepMoD): [description] data (torch.Tensor): [description] target (torch.Tensor): [description] optimizer ([type]): [description] sparsity_scheduler ([type]): [description] log_dir (Optional[str], optional): [description]. Defaults to None. max_iterations (int, optional): [description]. Defaults to 10000.
src/multitaskpinn/training/training.py
train
GJBoth/MultiTaskPINN
python
def train(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, test='mse', split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None: '[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n ' start_time = time.time() board = Tensorboard(log_dir) n_train = int((split * data.shape[0])) n_test = (data.shape[0] - n_train) (data_train, data_test) = torch.split(data, [n_train, n_test], dim=0) (target_train, target_test) = torch.split(target, [n_train, n_test], dim=0) convergence = Convergence(**convergence_kwargs) print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |') for iteration in np.arange(0, (max_iterations + 1)): (prediction, time_derivs, thetas) = model(data_train) MSE = torch.mean(((prediction - target_train) ** 2), dim=0) Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))]) loss = torch.sum((MSE + Reg)) optimizer.zero_grad() loss.backward() optimizer.step() if ((iteration % write_iterations) == 0): (prediction_test, coordinates) = model.func_approx(data_test) (time_derivs_test, thetas_test) = model.library((prediction_test, coordinates)) with torch.no_grad(): MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0) Reg_test = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))]) loss_test = torch.sum((MSE_test + Reg_test)) _ = model.sparse_estimator(thetas, time_derivs) estimator_coeff_vectors = model.estimator_coeffs() l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0) progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item()) board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test) if ((iteration % write_iterations) == 0): if (test == 'mse'): sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer) else: sparsity_scheduler(iteration, loss_test, model, optimizer) if (sparsity_scheduler.apply_sparsity is True): with torch.no_grad(): model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs) sparsity_scheduler.reset() convergence(iteration, torch.sum(l1_norm)) if (convergence.converged is True): print('Algorithm converged. Stopping training.') break board.close() if (log_dir is None): path = 'model.pt' else: path = join(log_dir, 'model.pt') torch.save(model.state_dict(), path)
def train_multitask(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, test='mse', split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None: '[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n ' start_time = time.time() board = Tensorboard(log_dir) n_train = int((split * data.shape[0])) n_test = (data.shape[0] - n_train) (data_train, data_test) = torch.split(data, [n_train, n_test], dim=0) (target_train, target_test) = torch.split(target, [n_train, n_test], dim=0) convergence = Convergence(**convergence_kwargs) print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |') for iteration in np.arange(0, (max_iterations + 1)): (prediction, time_derivs, thetas) = model(data_train) MSE = torch.mean(((prediction - target_train) ** 2), dim=0) Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))]) loss = torch.sum((((torch.exp((- model.s[:, 0])) * MSE) + (torch.exp((- model.s[:, 1])) * Reg)) + torch.sum(model.s))) optimizer.zero_grad() loss.backward() optimizer.step() if ((iteration % write_iterations) == 0): (prediction_test, coordinates) = model.func_approx(data_test) (time_derivs_test, thetas_test) = model.library((prediction_test, coordinates)) with torch.no_grad(): MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0) Reg_test = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))]) loss_test = torch.sum((((torch.exp((- model.s[:, 0])) * MSE_test) + (torch.exp((- model.s[:, 1])) * Reg_test)) + torch.sum(model.s))) _ = model.sparse_estimator(thetas, time_derivs) estimator_coeff_vectors = model.estimator_coeffs() l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0) progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item()) board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test, s=model.s) if ((iteration % write_iterations) == 0): if (test == 'mse'): sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer) else: sparsity_scheduler(iteration, loss_test, model, optimizer) if (sparsity_scheduler.apply_sparsity is True): with torch.no_grad(): model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs) sparsity_scheduler.reset() convergence(iteration, torch.sum(l1_norm)) if (convergence.converged is True): print('Algorithm converged. Stopping training.') break board.close() if (log_dir is None): path = 'model.pt' else: path = join(log_dir, 'model.pt') torch.save(model.state_dict(), path)
-735,518,995,854,886,700
[summary] Args: model (DeepMoD): [description] data (torch.Tensor): [description] target (torch.Tensor): [description] optimizer ([type]): [description] sparsity_scheduler ([type]): [description] log_dir (Optional[str], optional): [description]. Defaults to None. max_iterations (int, optional): [description]. Defaults to 10000.
src/multitaskpinn/training/training.py
train_multitask
GJBoth/MultiTaskPINN
python
def train_multitask(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, test='mse', split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None: '[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n ' start_time = time.time() board = Tensorboard(log_dir) n_train = int((split * data.shape[0])) n_test = (data.shape[0] - n_train) (data_train, data_test) = torch.split(data, [n_train, n_test], dim=0) (target_train, target_test) = torch.split(target, [n_train, n_test], dim=0) convergence = Convergence(**convergence_kwargs) print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |') for iteration in np.arange(0, (max_iterations + 1)): (prediction, time_derivs, thetas) = model(data_train) MSE = torch.mean(((prediction - target_train) ** 2), dim=0) Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))]) loss = torch.sum((((torch.exp((- model.s[:, 0])) * MSE) + (torch.exp((- model.s[:, 1])) * Reg)) + torch.sum(model.s))) optimizer.zero_grad() loss.backward() optimizer.step() if ((iteration % write_iterations) == 0): (prediction_test, coordinates) = model.func_approx(data_test) (time_derivs_test, thetas_test) = model.library((prediction_test, coordinates)) with torch.no_grad(): MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0) Reg_test = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))]) loss_test = torch.sum((((torch.exp((- model.s[:, 0])) * MSE_test) + (torch.exp((- model.s[:, 1])) * Reg_test)) + torch.sum(model.s))) _ = model.sparse_estimator(thetas, time_derivs) estimator_coeff_vectors = model.estimator_coeffs() l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0) progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item()) board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test, s=model.s) if ((iteration % write_iterations) == 0): if (test == 'mse'): sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer) else: sparsity_scheduler(iteration, loss_test, model, optimizer) if (sparsity_scheduler.apply_sparsity is True): with torch.no_grad(): model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs) sparsity_scheduler.reset() convergence(iteration, torch.sum(l1_norm)) if (convergence.converged is True): print('Algorithm converged. Stopping training.') break board.close() if (log_dir is None): path = 'model.pt' else: path = join(log_dir, 'model.pt') torch.save(model.state_dict(), path)
def train_multitask_capped(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, test='mse', split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None: '[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n ' start_time = time.time() board = Tensorboard(log_dir) n_train = int((split * data.shape[0])) n_test = (data.shape[0] - n_train) (data_train, data_test) = torch.split(data, [n_train, n_test], dim=0) (target_train, target_test) = torch.split(target, [n_train, n_test], dim=0) cutoff = torch.tensor(15.0).to(target.device) convergence = Convergence(**convergence_kwargs) print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |') for iteration in np.arange(0, (max_iterations + 1)): (prediction, time_derivs, thetas) = model(data_train) MSE = torch.mean(((prediction - target_train) ** 2), dim=0) Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2), dim=0) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))]) s_capped = torch.min(torch.max(model.s, (- cutoff)), cutoff) loss = torch.sum((((torch.exp((- s_capped[:, 0])) * MSE) + (torch.exp((- s_capped[:, 1])) * Reg)) + torch.sum(s_capped))) optimizer.zero_grad() loss.backward() optimizer.step() if ((iteration % write_iterations) == 0): (prediction_test, coordinates) = model.func_approx(data_test) (time_derivs_test, thetas_test) = model.library((prediction_test, coordinates)) with torch.no_grad(): MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0) Reg_test = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))]) loss_test = torch.sum((((torch.exp((- s_capped[:, 0])) * MSE_test) + (torch.exp((- s_capped[:, 1])) * Reg_test)) + torch.sum(s_capped))) _ = model.sparse_estimator(thetas, time_derivs) estimator_coeff_vectors = model.estimator_coeffs() l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0) progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item()) board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test, s=model.s) if ((iteration % write_iterations) == 0): if (test == 'mse'): sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer) else: sparsity_scheduler(iteration, loss_test, model, optimizer) if (sparsity_scheduler.apply_sparsity is True): with torch.no_grad(): model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs) sparsity_scheduler.reset() convergence(iteration, torch.sum(l1_norm)) if (convergence.converged is True): print('Algorithm converged. Stopping training.') break board.close() if (log_dir is None): path = 'model.pt' else: path = join(log_dir, 'model.pt') torch.save(model.state_dict(), path)
3,808,070,609,708,569,000
[summary] Args: model (DeepMoD): [description] data (torch.Tensor): [description] target (torch.Tensor): [description] optimizer ([type]): [description] sparsity_scheduler ([type]): [description] log_dir (Optional[str], optional): [description]. Defaults to None. max_iterations (int, optional): [description]. Defaults to 10000.
src/multitaskpinn/training/training.py
train_multitask_capped
GJBoth/MultiTaskPINN
python
def train_multitask_capped(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, test='mse', split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None: '[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n ' start_time = time.time() board = Tensorboard(log_dir) n_train = int((split * data.shape[0])) n_test = (data.shape[0] - n_train) (data_train, data_test) = torch.split(data, [n_train, n_test], dim=0) (target_train, target_test) = torch.split(target, [n_train, n_test], dim=0) cutoff = torch.tensor(15.0).to(target.device) convergence = Convergence(**convergence_kwargs) print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |') for iteration in np.arange(0, (max_iterations + 1)): (prediction, time_derivs, thetas) = model(data_train) MSE = torch.mean(((prediction - target_train) ** 2), dim=0) Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2), dim=0) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))]) s_capped = torch.min(torch.max(model.s, (- cutoff)), cutoff) loss = torch.sum((((torch.exp((- s_capped[:, 0])) * MSE) + (torch.exp((- s_capped[:, 1])) * Reg)) + torch.sum(s_capped))) optimizer.zero_grad() loss.backward() optimizer.step() if ((iteration % write_iterations) == 0): (prediction_test, coordinates) = model.func_approx(data_test) (time_derivs_test, thetas_test) = model.library((prediction_test, coordinates)) with torch.no_grad(): MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0) Reg_test = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))]) loss_test = torch.sum((((torch.exp((- s_capped[:, 0])) * MSE_test) + (torch.exp((- s_capped[:, 1])) * Reg_test)) + torch.sum(s_capped))) _ = model.sparse_estimator(thetas, time_derivs) estimator_coeff_vectors = model.estimator_coeffs() l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0) progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item()) board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test, s=model.s) if ((iteration % write_iterations) == 0): if (test == 'mse'): sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer) else: sparsity_scheduler(iteration, loss_test, model, optimizer) if (sparsity_scheduler.apply_sparsity is True): with torch.no_grad(): model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs) sparsity_scheduler.reset() convergence(iteration, torch.sum(l1_norm)) if (convergence.converged is True): print('Algorithm converged. Stopping training.') break board.close() if (log_dir is None): path = 'model.pt' else: path = join(log_dir, 'model.pt') torch.save(model.state_dict(), path)
def train_gradnorm(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, alpha, test='mse', split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None: '[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n ' start_time = time.time() board = Tensorboard(log_dir) n_train = int((split * data.shape[0])) n_test = (data.shape[0] - n_train) (data_train, data_test) = torch.split(data, [n_train, n_test], dim=0) (target_train, target_test) = torch.split(target, [n_train, n_test], dim=0) convergence = Convergence(**convergence_kwargs) print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |') for iteration in np.arange(0, (max_iterations + 1)): (prediction, time_derivs, thetas) = model(data_train) MSE = torch.mean(((prediction - target_train) ** 2), dim=0) Reg = torch.cat([torch.mean(((dt - (theta @ coeff_vector)) ** 2), dim=0) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))]) task_loss = (torch.exp(model.weights) * torch.stack((MSE, Reg), axis=1)).flatten() loss = torch.sum(task_loss) if (iteration == 0): ini_loss = task_loss.data if torch.any((task_loss.data > ini_loss)): ini_loss[(task_loss.data > ini_loss)] = task_loss.data[(task_loss.data > ini_loss)] optimizer.zero_grad() loss.backward(retain_graph=True) model.weights.grad.data = (model.weights.grad.data * 0.0) G = torch.tensor([torch.norm(torch.autograd.grad(loss_i, list(model.parameters())[(- 2)], retain_graph=True, create_graph=True)[0], 2) for loss_i in task_loss]).to(data.device) G_mean = torch.mean(G) rel_loss = (task_loss / ini_loss) inv_train_rate = (rel_loss / torch.mean(rel_loss)) grad_norm_loss = torch.sum(torch.abs((G - (G_mean * (inv_train_rate ** alpha))))) model.weights.grad = torch.autograd.grad(grad_norm_loss, model.weights)[0] optimizer.step() normalize_coeff = (task_loss.shape[0] / torch.sum(model.weights)) model.weights.data = torch.log((torch.exp(model.weights.data) * normalize_coeff)) if ((iteration % write_iterations) == 0): (prediction_test, coordinates) = model.func_approx(data_test) (time_derivs_test, thetas_test) = model.library((prediction_test, coordinates)) with torch.no_grad(): MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0) Reg_test = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))]) loss_test = (model.weights @ torch.stack((MSE, Reg), axis=0)) _ = model.sparse_estimator(thetas, time_derivs) estimator_coeff_vectors = model.estimator_coeffs() l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0) progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item()) board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test, w=model.weights) if ((iteration % write_iterations) == 0): if (test == 'mse'): sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer) else: sparsity_scheduler(iteration, loss_test, model, optimizer) if (sparsity_scheduler.apply_sparsity is True): with torch.no_grad(): model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs) sparsity_scheduler.reset() convergence(iteration, torch.sum(l1_norm)) if (convergence.converged is True): print('Algorithm converged. Stopping training.') break board.close() if (log_dir is None): path = 'model.pt' else: path = join(log_dir, 'model.pt') torch.save(model.state_dict(), path)
6,851,036,863,452,085,000
[summary] Args: model (DeepMoD): [description] data (torch.Tensor): [description] target (torch.Tensor): [description] optimizer ([type]): [description] sparsity_scheduler ([type]): [description] log_dir (Optional[str], optional): [description]. Defaults to None. max_iterations (int, optional): [description]. Defaults to 10000.
src/multitaskpinn/training/training.py
train_gradnorm
GJBoth/MultiTaskPINN
python
def train_gradnorm(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, sparsity_scheduler, alpha, test='mse', split: float=0.8, log_dir: Optional[str]=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None: '[summary]\n\n Args:\n model (DeepMoD): [description]\n data (torch.Tensor): [description]\n target (torch.Tensor): [description]\n optimizer ([type]): [description]\n sparsity_scheduler ([type]): [description]\n log_dir (Optional[str], optional): [description]. Defaults to None.\n max_iterations (int, optional): [description]. Defaults to 10000.\n ' start_time = time.time() board = Tensorboard(log_dir) n_train = int((split * data.shape[0])) n_test = (data.shape[0] - n_train) (data_train, data_test) = torch.split(data, [n_train, n_test], dim=0) (target_train, target_test) = torch.split(target, [n_train, n_test], dim=0) convergence = Convergence(**convergence_kwargs) print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |') for iteration in np.arange(0, (max_iterations + 1)): (prediction, time_derivs, thetas) = model(data_train) MSE = torch.mean(((prediction - target_train) ** 2), dim=0) Reg = torch.cat([torch.mean(((dt - (theta @ coeff_vector)) ** 2), dim=0) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))]) task_loss = (torch.exp(model.weights) * torch.stack((MSE, Reg), axis=1)).flatten() loss = torch.sum(task_loss) if (iteration == 0): ini_loss = task_loss.data if torch.any((task_loss.data > ini_loss)): ini_loss[(task_loss.data > ini_loss)] = task_loss.data[(task_loss.data > ini_loss)] optimizer.zero_grad() loss.backward(retain_graph=True) model.weights.grad.data = (model.weights.grad.data * 0.0) G = torch.tensor([torch.norm(torch.autograd.grad(loss_i, list(model.parameters())[(- 2)], retain_graph=True, create_graph=True)[0], 2) for loss_i in task_loss]).to(data.device) G_mean = torch.mean(G) rel_loss = (task_loss / ini_loss) inv_train_rate = (rel_loss / torch.mean(rel_loss)) grad_norm_loss = torch.sum(torch.abs((G - (G_mean * (inv_train_rate ** alpha))))) model.weights.grad = torch.autograd.grad(grad_norm_loss, model.weights)[0] optimizer.step() normalize_coeff = (task_loss.shape[0] / torch.sum(model.weights)) model.weights.data = torch.log((torch.exp(model.weights.data) * normalize_coeff)) if ((iteration % write_iterations) == 0): (prediction_test, coordinates) = model.func_approx(data_test) (time_derivs_test, thetas_test) = model.library((prediction_test, coordinates)) with torch.no_grad(): MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0) Reg_test = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))]) loss_test = (model.weights @ torch.stack((MSE, Reg), axis=0)) _ = model.sparse_estimator(thetas, time_derivs) estimator_coeff_vectors = model.estimator_coeffs() l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0) progress(iteration, start_time, max_iterations, loss.item(), torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item()) board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test, w=model.weights) if ((iteration % write_iterations) == 0): if (test == 'mse'): sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer) else: sparsity_scheduler(iteration, loss_test, model, optimizer) if (sparsity_scheduler.apply_sparsity is True): with torch.no_grad(): model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs) sparsity_scheduler.reset() convergence(iteration, torch.sum(l1_norm)) if (convergence.converged is True): print('Algorithm converged. Stopping training.') break board.close() if (log_dir is None): path = 'model.pt' else: path = join(log_dir, 'model.pt') torch.save(model.state_dict(), path)
def train_SBL(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, extra_params, sparsity_scheduler, split=0.8, exp_ID: str=None, log_dir: str=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None: 'Trains the DeepMoD model. This function automatically splits the data set in a train and test set. \n\n Args:\n model (DeepMoD): A DeepMoD object.\n data (torch.Tensor): Tensor of shape (n_samples x (n_spatial + 1)) containing the coordinates, first column should be the time coordinate.\n target (torch.Tensor): Tensor of shape (n_samples x n_features) containing the target data.\n optimizer ([type]): Pytorch optimizer.\n sparsity_scheduler ([type]): Decides when to update the sparsity mask.\n split (float, optional): Fraction of the train set, by default 0.8.\n exp_ID (str, optional): Unique ID to identify tensorboard file. Not used if log_dir is given, see pytorch documentation.\n log_dir (str, optional): Directory where tensorboard file is written, by default None.\n max_iterations (int, optional): [description]. Max number of epochs , by default 10000.\n write_iterations (int, optional): [description]. Sets how often data is written to tensorboard and checks train loss , by default 25.\n ' logger = Logger(exp_ID, log_dir) sparsity_scheduler.path = logger.log_dir (t, a, l) = extra_params n_train = int((split * data.shape[0])) n_test = (data.shape[0] - n_train) (data_train, data_test) = torch.split(data, [n_train, n_test], dim=0) (target_train, target_test) = torch.split(target, [n_train, n_test], dim=0) M = 12 N = data_train.shape[0] threshold = 10000.0 convergence = Convergence(**convergence_kwargs) for iteration in torch.arange(0, max_iterations): (prediction, time_derivs, thetas) = model(data_train) tau_ = torch.exp(t) alpha_ = torch.min(torch.exp(a), torch.tensor(100000000.0, dtype=torch.float32)) lambda_ = torch.min(torch.exp(l), torch.tensor(20000.0, dtype=torch.float32)) y = time_derivs[0] X = (thetas[0] / torch.norm(thetas[0], dim=0, keepdim=True)) p_MSE = ((N / 2) * (((tau_ * torch.mean(((prediction - target_train) ** 2), dim=0)) - t) + np.log((2 * np.pi)))) A = (torch.diag(lambda_) + ((alpha_ * X.T) @ X)) mn = ((lambda_ < threshold)[:, None] * (((alpha_ * torch.inverse(A)) @ X.T) @ y)) E = ((alpha_ * torch.sum(((y - (X @ mn)) ** 2))) + ((mn.T @ torch.diag(lambda_)) @ mn)) p_reg = ((1 / 2) * (((E + torch.sum(torch.log(torch.diag(A)[(lambda_ < threshold)]))) - (torch.sum(l[(lambda_ < threshold)]) + (N * a))) - (N * np.log((2 * np.pi))))) MSE = torch.mean(((prediction - target_train) ** 2), dim=0) Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))]) loss = torch.sum((p_MSE + p_reg)) optimizer.zero_grad() loss.backward() optimizer.step() if ((iteration % write_iterations) == 0): with torch.no_grad(): prediction_test = model.func_approx(data_test)[0] MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0) _ = model.sparse_estimator(thetas, time_derivs) logger(iteration, loss, MSE, Reg, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), model.estimator_coeffs(), MSE_test=MSE_test, p_MSE=p_MSE, p_reg=p_reg, tau=tau_, alpha=alpha_, lambda_=lambda_, mn=mn) update_sparsity = sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer) if update_sparsity: model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs) l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1))) converged = convergence(iteration, l1_norm) if converged: break logger.close(model)
-1,715,964,040,550,835,700
Trains the DeepMoD model. This function automatically splits the data set in a train and test set. Args: model (DeepMoD): A DeepMoD object. data (torch.Tensor): Tensor of shape (n_samples x (n_spatial + 1)) containing the coordinates, first column should be the time coordinate. target (torch.Tensor): Tensor of shape (n_samples x n_features) containing the target data. optimizer ([type]): Pytorch optimizer. sparsity_scheduler ([type]): Decides when to update the sparsity mask. split (float, optional): Fraction of the train set, by default 0.8. exp_ID (str, optional): Unique ID to identify tensorboard file. Not used if log_dir is given, see pytorch documentation. log_dir (str, optional): Directory where tensorboard file is written, by default None. max_iterations (int, optional): [description]. Max number of epochs , by default 10000. write_iterations (int, optional): [description]. Sets how often data is written to tensorboard and checks train loss , by default 25.
src/multitaskpinn/training/training.py
train_SBL
GJBoth/MultiTaskPINN
python
def train_SBL(model: DeepMoD, data: torch.Tensor, target: torch.Tensor, optimizer, extra_params, sparsity_scheduler, split=0.8, exp_ID: str=None, log_dir: str=None, max_iterations: int=10000, write_iterations: int=25, **convergence_kwargs) -> None: 'Trains the DeepMoD model. This function automatically splits the data set in a train and test set. \n\n Args:\n model (DeepMoD): A DeepMoD object.\n data (torch.Tensor): Tensor of shape (n_samples x (n_spatial + 1)) containing the coordinates, first column should be the time coordinate.\n target (torch.Tensor): Tensor of shape (n_samples x n_features) containing the target data.\n optimizer ([type]): Pytorch optimizer.\n sparsity_scheduler ([type]): Decides when to update the sparsity mask.\n split (float, optional): Fraction of the train set, by default 0.8.\n exp_ID (str, optional): Unique ID to identify tensorboard file. Not used if log_dir is given, see pytorch documentation.\n log_dir (str, optional): Directory where tensorboard file is written, by default None.\n max_iterations (int, optional): [description]. Max number of epochs , by default 10000.\n write_iterations (int, optional): [description]. Sets how often data is written to tensorboard and checks train loss , by default 25.\n ' logger = Logger(exp_ID, log_dir) sparsity_scheduler.path = logger.log_dir (t, a, l) = extra_params n_train = int((split * data.shape[0])) n_test = (data.shape[0] - n_train) (data_train, data_test) = torch.split(data, [n_train, n_test], dim=0) (target_train, target_test) = torch.split(target, [n_train, n_test], dim=0) M = 12 N = data_train.shape[0] threshold = 10000.0 convergence = Convergence(**convergence_kwargs) for iteration in torch.arange(0, max_iterations): (prediction, time_derivs, thetas) = model(data_train) tau_ = torch.exp(t) alpha_ = torch.min(torch.exp(a), torch.tensor(100000000.0, dtype=torch.float32)) lambda_ = torch.min(torch.exp(l), torch.tensor(20000.0, dtype=torch.float32)) y = time_derivs[0] X = (thetas[0] / torch.norm(thetas[0], dim=0, keepdim=True)) p_MSE = ((N / 2) * (((tau_ * torch.mean(((prediction - target_train) ** 2), dim=0)) - t) + np.log((2 * np.pi)))) A = (torch.diag(lambda_) + ((alpha_ * X.T) @ X)) mn = ((lambda_ < threshold)[:, None] * (((alpha_ * torch.inverse(A)) @ X.T) @ y)) E = ((alpha_ * torch.sum(((y - (X @ mn)) ** 2))) + ((mn.T @ torch.diag(lambda_)) @ mn)) p_reg = ((1 / 2) * (((E + torch.sum(torch.log(torch.diag(A)[(lambda_ < threshold)]))) - (torch.sum(l[(lambda_ < threshold)]) + (N * a))) - (N * np.log((2 * np.pi))))) MSE = torch.mean(((prediction - target_train) ** 2), dim=0) Reg = torch.stack([torch.mean(((dt - (theta @ coeff_vector)) ** 2)) for (dt, theta, coeff_vector) in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))]) loss = torch.sum((p_MSE + p_reg)) optimizer.zero_grad() loss.backward() optimizer.step() if ((iteration % write_iterations) == 0): with torch.no_grad(): prediction_test = model.func_approx(data_test)[0] MSE_test = torch.mean(((prediction_test - target_test) ** 2), dim=0) _ = model.sparse_estimator(thetas, time_derivs) logger(iteration, loss, MSE, Reg, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), model.estimator_coeffs(), MSE_test=MSE_test, p_MSE=p_MSE, p_reg=p_reg, tau=tau_, alpha=alpha_, lambda_=lambda_, mn=mn) update_sparsity = sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer) if update_sparsity: model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs) l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1))) converged = convergence(iteration, l1_norm) if converged: break logger.close(model)
def decoderawtransaction_asm_sighashtype(self): 'Test decoding scripts via RPC command "decoderawtransaction".\n\n This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.\n ' tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm']) tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid']) assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm']) assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) txSave = CTransaction() txSave.deserialize(BytesIO(hex_str_to_bytes(tx))) tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm']) tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:((72 * 2) + 4)] signature = push_signature[2:] der_signature = signature[:(- 2)] signature_sighash_decoded = (der_signature + '[ALL]') signature_2 = (der_signature + '82') push_signature_2 = ('48' + signature_2) signature_2_sighash_decoded = (der_signature + '[NONE|ANYONECANPAY]') txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature) rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2) rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) txSave.vin[0].scriptSig = hex_str_to_bytes((('00' + push_signature) + push_signature_2)) rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) assert_equal(((('0 ' + signature_sighash_decoded) + ' ') + signature_2_sighash_decoded), rpc_result['vin'][0]['scriptSig']['asm']) txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101') rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
3,089,507,438,065,855,000
Test decoding scripts via RPC command "decoderawtransaction". This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
test/functional/rpc_decodescript.py
decoderawtransaction_asm_sighashtype
Black-NET/erexcoin-source
python
def decoderawtransaction_asm_sighashtype(self): 'Test decoding scripts via RPC command "decoderawtransaction".\n\n This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.\n ' tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm']) tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid']) assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm']) assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) txSave = CTransaction() txSave.deserialize(BytesIO(hex_str_to_bytes(tx))) tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm']) tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:((72 * 2) + 4)] signature = push_signature[2:] der_signature = signature[:(- 2)] signature_sighash_decoded = (der_signature + '[ALL]') signature_2 = (der_signature + '82') push_signature_2 = ('48' + signature_2) signature_2_sighash_decoded = (der_signature + '[NONE|ANYONECANPAY]') txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature) rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2) rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) txSave.vin[0].scriptSig = hex_str_to_bytes((('00' + push_signature) + push_signature_2)) rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) assert_equal(((('0 ' + signature_sighash_decoded) + ' ') + signature_2_sighash_decoded), rpc_result['vin'][0]['scriptSig']['asm']) txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101') rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def create_dynamic_ordering_constraint(index: int) -> str: '\n Creates a valid AMPL constraint of the form:\n [LaTex]: $start\\_time_j+1 >= start\\_time_j + duration_j$, $\x0corall j \\in BATCH$\n :param index: j index where the current constraint should start\n :return: single AMPL JIT constraint as a string\n ' i = str(index) i_next = str((index + 1)) constraint_name = f'ordering_{i_next}_{i}' return f's.t. {constraint_name}: start_time[{i_next}] >= start_time[{i}] + duration[{i}];'
6,747,510,227,925,415,000
Creates a valid AMPL constraint of the form: [LaTex]: $start\_time_j+1 >= start\_time_j + duration_j$, $ orall j \in BATCH$ :param index: j index where the current constraint should start :return: single AMPL JIT constraint as a string
ampljit/utils.py
create_dynamic_ordering_constraint
jkomyno/amplrestapi
python
def create_dynamic_ordering_constraint(index: int) -> str: '\n Creates a valid AMPL constraint of the form:\n [LaTex]: $start\\_time_j+1 >= start\\_time_j + duration_j$, $\x0corall j \\in BATCH$\n :param index: j index where the current constraint should start\n :return: single AMPL JIT constraint as a string\n ' i = str(index) i_next = str((index + 1)) constraint_name = f'ordering_{i_next}_{i}' return f's.t. {constraint_name}: start_time[{i_next}] >= start_time[{i}] + duration[{i}];'
def dict_to_list(obj: dict) -> list: "\n Converts a dictionary to a list, extracting the values of the dictionary.\n The list is sorted according to the dict's keys ascendant order.\n The given dictionary should always have the same numeric keys as the result of create_batch_dictionary().\n :param obj: the dictionary to convert which should have numeric keys\n :return: the list of values in the dictionary\n " return list(obj.values())
9,012,645,828,889,174,000
Converts a dictionary to a list, extracting the values of the dictionary. The list is sorted according to the dict's keys ascendant order. The given dictionary should always have the same numeric keys as the result of create_batch_dictionary(). :param obj: the dictionary to convert which should have numeric keys :return: the list of values in the dictionary
ampljit/utils.py
dict_to_list
jkomyno/amplrestapi
python
def dict_to_list(obj: dict) -> list: "\n Converts a dictionary to a list, extracting the values of the dictionary.\n The list is sorted according to the dict's keys ascendant order.\n The given dictionary should always have the same numeric keys as the result of create_batch_dictionary().\n :param obj: the dictionary to convert which should have numeric keys\n :return: the list of values in the dictionary\n " return list(obj.values())
def strings_to_datetimes(str_date_lst: list, datetime_format: str) -> list: '\n Converts a list of strings into a list of datetime objects\n :param str_date_lst: list of string objects compatible with the ISO8601 format\n :param datetime_format: format of the datetime\n :return: list of datetime objects equivalent to the given str_date_lst\n ' return [datetime.strptime(d, datetime_format) for d in str_date_lst]
-3,893,893,176,523,418,600
Converts a list of strings into a list of datetime objects :param str_date_lst: list of string objects compatible with the ISO8601 format :param datetime_format: format of the datetime :return: list of datetime objects equivalent to the given str_date_lst
ampljit/utils.py
strings_to_datetimes
jkomyno/amplrestapi
python
def strings_to_datetimes(str_date_lst: list, datetime_format: str) -> list: '\n Converts a list of strings into a list of datetime objects\n :param str_date_lst: list of string objects compatible with the ISO8601 format\n :param datetime_format: format of the datetime\n :return: list of datetime objects equivalent to the given str_date_lst\n ' return [datetime.strptime(d, datetime_format) for d in str_date_lst]
def minute_timedelta(first: datetime, second: datetime) -> int: '\n Returns the difference expressed in minutes between 2 datetime objects\n :param first: datetime object that comes before second\n :param second: datetime object that comes after first\n :return: difference in minutes between second and first\n ' delta: timedelta = (second - first) return divmod(delta.total_seconds(), 60)[0]
5,728,973,006,739,140,000
Returns the difference expressed in minutes between 2 datetime objects :param first: datetime object that comes before second :param second: datetime object that comes after first :return: difference in minutes between second and first
ampljit/utils.py
minute_timedelta
jkomyno/amplrestapi
python
def minute_timedelta(first: datetime, second: datetime) -> int: '\n Returns the difference expressed in minutes between 2 datetime objects\n :param first: datetime object that comes before second\n :param second: datetime object that comes after first\n :return: difference in minutes between second and first\n ' delta: timedelta = (second - first) return divmod(delta.total_seconds(), 60)[0]
def minute_timedeltas_wrt_first(datetime_lst: list) -> list: "\n Converts a list of datetime objects into a list of minute time deltas with respect to the first item.\n For example, given the input datetime_lst:\n [\n '2019-08-22 14:32',\n '2019-08-22 14:38',\n '2019-08-22 14:42',\n '2019-08-22 14:52',\n '2019-08-22 14:57'\n ],\n the result would be:\n [32, 38, 42, 52, 57]\n\n :param datetime_lst: list of datetime objects\n :return: minute time deltas with respect to the first item of datetime_lst\n " first_datetime: datetime = datetime_lst[0] partial_deltas = [minute_timedelta(first=first_datetime, second=v) for v in datetime_lst[1:]] first_minutes = first_datetime.minute return ([first_minutes] + list(map((lambda x: (x + first_minutes)), partial_deltas)))
4,508,611,415,876,424,000
Converts a list of datetime objects into a list of minute time deltas with respect to the first item. For example, given the input datetime_lst: [ '2019-08-22 14:32', '2019-08-22 14:38', '2019-08-22 14:42', '2019-08-22 14:52', '2019-08-22 14:57' ], the result would be: [32, 38, 42, 52, 57] :param datetime_lst: list of datetime objects :return: minute time deltas with respect to the first item of datetime_lst
ampljit/utils.py
minute_timedeltas_wrt_first
jkomyno/amplrestapi
python
def minute_timedeltas_wrt_first(datetime_lst: list) -> list: "\n Converts a list of datetime objects into a list of minute time deltas with respect to the first item.\n For example, given the input datetime_lst:\n [\n '2019-08-22 14:32',\n '2019-08-22 14:38',\n '2019-08-22 14:42',\n '2019-08-22 14:52',\n '2019-08-22 14:57'\n ],\n the result would be:\n [32, 38, 42, 52, 57]\n\n :param datetime_lst: list of datetime objects\n :return: minute time deltas with respect to the first item of datetime_lst\n " first_datetime: datetime = datetime_lst[0] partial_deltas = [minute_timedelta(first=first_datetime, second=v) for v in datetime_lst[1:]] first_minutes = first_datetime.minute return ([first_minutes] + list(map((lambda x: (x + first_minutes)), partial_deltas)))
def set_minutes_to_datetimes(datetime_lst: list, minutes_lst: list) -> list: '\n Given a list of minutes and datetime objects, sets each amount of minutes to each datetime object with respect\n to the list index. The two lists must have the same size.\n :param datetime_lst: list of datetime objects\n :param minutes_lst: list of minutes to set to a list of datetime objects\n :return: list of datetime objects similar to datetime_lst but shifted according to minutes_lst\n ' return [(d.replace(minute=0) + timedelta(minutes=m)) for (d, m) in zip(datetime_lst, minutes_lst)]
-1,701,486,862,418,520,600
Given a list of minutes and datetime objects, sets each amount of minutes to each datetime object with respect to the list index. The two lists must have the same size. :param datetime_lst: list of datetime objects :param minutes_lst: list of minutes to set to a list of datetime objects :return: list of datetime objects similar to datetime_lst but shifted according to minutes_lst
ampljit/utils.py
set_minutes_to_datetimes
jkomyno/amplrestapi
python
def set_minutes_to_datetimes(datetime_lst: list, minutes_lst: list) -> list: '\n Given a list of minutes and datetime objects, sets each amount of minutes to each datetime object with respect\n to the list index. The two lists must have the same size.\n :param datetime_lst: list of datetime objects\n :param minutes_lst: list of minutes to set to a list of datetime objects\n :return: list of datetime objects similar to datetime_lst but shifted according to minutes_lst\n ' return [(d.replace(minute=0) + timedelta(minutes=m)) for (d, m) in zip(datetime_lst, minutes_lst)]
def datetimes_to_strings(datetime_lst: list, datetime_format: str) -> list: '\n Converts a list of datetime objects to strings, according to a certain datetime format.\n :param datetime_lst: list of datetime objects to convert to string\n :param datetime_format: format of the datetime\n :return: the list of datetime objects converted to strings in the given datetime format\n ' return [d.strftime(datetime_format) for d in datetime_lst]
213,313,511,449,219,460
Converts a list of datetime objects to strings, according to a certain datetime format. :param datetime_lst: list of datetime objects to convert to string :param datetime_format: format of the datetime :return: the list of datetime objects converted to strings in the given datetime format
ampljit/utils.py
datetimes_to_strings
jkomyno/amplrestapi
python
def datetimes_to_strings(datetime_lst: list, datetime_format: str) -> list: '\n Converts a list of datetime objects to strings, according to a certain datetime format.\n :param datetime_lst: list of datetime objects to convert to string\n :param datetime_format: format of the datetime\n :return: the list of datetime objects converted to strings in the given datetime format\n ' return [d.strftime(datetime_format) for d in datetime_lst]
def test_len(self): ' test for boostrap helper' for i in range(len(test_cases)): self.assertEqual(len(results[i]), test_cases[i]['len']) self.assertEqual(len(resultsInt64[i]), test_cases[i]['len'])
7,617,248,818,454,541,000
test for boostrap helper
neaps-api/neaps_lib/bootstrap_test.py
test_len
ExpediaGroup/neaps
python
def test_len(self): ' ' for i in range(len(test_cases)): self.assertEqual(len(results[i]), test_cases[i]['len']) self.assertEqual(len(resultsInt64[i]), test_cases[i]['len'])
def test_value(self): ' docstring ' for i in range(len(test_cases)): for j in range(len(results[i])): self.assertEqual(results[i][j], test_cases[i]['mean']) self.assertEqual(resultsInt64[i][j], test_cases[i]['meanInt64']) self.assertIsInstance(results[i][j], np.float64) self.assertIsInstance(resultsInt64[i][j], np.int64)
-7,203,681,843,411,713,000
docstring
neaps-api/neaps_lib/bootstrap_test.py
test_value
ExpediaGroup/neaps
python
def test_value(self): ' ' for i in range(len(test_cases)): for j in range(len(results[i])): self.assertEqual(results[i][j], test_cases[i]['mean']) self.assertEqual(resultsInt64[i][j], test_cases[i]['meanInt64']) self.assertIsInstance(results[i][j], np.float64) self.assertIsInstance(resultsInt64[i][j], np.int64)
def test_less(self): ' docstring ' for i in range(len(test_cases)): for j in range(len(results[i])): self.assertLessEqual(results[i][j], max(test_cases[i]['sample']))
-198,653,698,443,766,720
docstring
neaps-api/neaps_lib/bootstrap_test.py
test_less
ExpediaGroup/neaps
python
def test_less(self): ' ' for i in range(len(test_cases)): for j in range(len(results[i])): self.assertLessEqual(results[i][j], max(test_cases[i]['sample']))
def test_greater(self): ' docstring ' for i in range(len(test_cases)): for j in range(len(results[i])): self.assertGreaterEqual(results[i][j], min(test_cases[i]['sample']))
9,088,840,391,431,958,000
docstring
neaps-api/neaps_lib/bootstrap_test.py
test_greater
ExpediaGroup/neaps
python
def test_greater(self): ' ' for i in range(len(test_cases)): for j in range(len(results[i])): self.assertGreaterEqual(results[i][j], min(test_cases[i]['sample']))
def __init__(self, authority=None, content=None, default_port=None, file=None, host=None, path=None, port=None, protocol=None, query=None, ref=None, user_info=None): 'URL - a model defined in Swagger' self._authority = None self._content = None self._default_port = None self._file = None self._host = None self._path = None self._port = None self._protocol = None self._query = None self._ref = None self._user_info = None self.discriminator = None if (authority is not None): self.authority = authority if (content is not None): self.content = content if (default_port is not None): self.default_port = default_port if (file is not None): self.file = file if (host is not None): self.host = host if (path is not None): self.path = path if (port is not None): self.port = port if (protocol is not None): self.protocol = protocol if (query is not None): self.query = query if (ref is not None): self.ref = ref if (user_info is not None): self.user_info = user_info
-4,837,812,409,801,342,000
URL - a model defined in Swagger
tb_rest_client/models/models_pe/url.py
__init__
CSTC-WTCB-BBRI/python_tb_rest_client
python
def __init__(self, authority=None, content=None, default_port=None, file=None, host=None, path=None, port=None, protocol=None, query=None, ref=None, user_info=None): self._authority = None self._content = None self._default_port = None self._file = None self._host = None self._path = None self._port = None self._protocol = None self._query = None self._ref = None self._user_info = None self.discriminator = None if (authority is not None): self.authority = authority if (content is not None): self.content = content if (default_port is not None): self.default_port = default_port if (file is not None): self.file = file if (host is not None): self.host = host if (path is not None): self.path = path if (port is not None): self.port = port if (protocol is not None): self.protocol = protocol if (query is not None): self.query = query if (ref is not None): self.ref = ref if (user_info is not None): self.user_info = user_info
@property def authority(self): 'Gets the authority of this URL. # noqa: E501\n\n\n :return: The authority of this URL. # noqa: E501\n :rtype: str\n ' return self._authority
6,008,489,197,909,201,000
Gets the authority of this URL. # noqa: E501 :return: The authority of this URL. # noqa: E501 :rtype: str
tb_rest_client/models/models_pe/url.py
authority
CSTC-WTCB-BBRI/python_tb_rest_client
python
@property def authority(self): 'Gets the authority of this URL. # noqa: E501\n\n\n :return: The authority of this URL. # noqa: E501\n :rtype: str\n ' return self._authority
@authority.setter def authority(self, authority): 'Sets the authority of this URL.\n\n\n :param authority: The authority of this URL. # noqa: E501\n :type: str\n ' self._authority = authority
5,373,869,373,672,434,000
Sets the authority of this URL. :param authority: The authority of this URL. # noqa: E501 :type: str
tb_rest_client/models/models_pe/url.py
authority
CSTC-WTCB-BBRI/python_tb_rest_client
python
@authority.setter def authority(self, authority): 'Sets the authority of this URL.\n\n\n :param authority: The authority of this URL. # noqa: E501\n :type: str\n ' self._authority = authority
@property def content(self): 'Gets the content of this URL. # noqa: E501\n\n\n :return: The content of this URL. # noqa: E501\n :rtype: object\n ' return self._content
-5,395,410,704,641,924,000
Gets the content of this URL. # noqa: E501 :return: The content of this URL. # noqa: E501 :rtype: object
tb_rest_client/models/models_pe/url.py
content
CSTC-WTCB-BBRI/python_tb_rest_client
python
@property def content(self): 'Gets the content of this URL. # noqa: E501\n\n\n :return: The content of this URL. # noqa: E501\n :rtype: object\n ' return self._content
@content.setter def content(self, content): 'Sets the content of this URL.\n\n\n :param content: The content of this URL. # noqa: E501\n :type: object\n ' self._content = content
-3,132,387,909,026,987,500
Sets the content of this URL. :param content: The content of this URL. # noqa: E501 :type: object
tb_rest_client/models/models_pe/url.py
content
CSTC-WTCB-BBRI/python_tb_rest_client
python
@content.setter def content(self, content): 'Sets the content of this URL.\n\n\n :param content: The content of this URL. # noqa: E501\n :type: object\n ' self._content = content
@property def default_port(self): 'Gets the default_port of this URL. # noqa: E501\n\n\n :return: The default_port of this URL. # noqa: E501\n :rtype: int\n ' return self._default_port
-2,337,253,307,847,136,000
Gets the default_port of this URL. # noqa: E501 :return: The default_port of this URL. # noqa: E501 :rtype: int
tb_rest_client/models/models_pe/url.py
default_port
CSTC-WTCB-BBRI/python_tb_rest_client
python
@property def default_port(self): 'Gets the default_port of this URL. # noqa: E501\n\n\n :return: The default_port of this URL. # noqa: E501\n :rtype: int\n ' return self._default_port
@default_port.setter def default_port(self, default_port): 'Sets the default_port of this URL.\n\n\n :param default_port: The default_port of this URL. # noqa: E501\n :type: int\n ' self._default_port = default_port
1,449,772,015,788,586,500
Sets the default_port of this URL. :param default_port: The default_port of this URL. # noqa: E501 :type: int
tb_rest_client/models/models_pe/url.py
default_port
CSTC-WTCB-BBRI/python_tb_rest_client
python
@default_port.setter def default_port(self, default_port): 'Sets the default_port of this URL.\n\n\n :param default_port: The default_port of this URL. # noqa: E501\n :type: int\n ' self._default_port = default_port