repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
dwavesystems/dwave-system | dwave/embedding/chimera.py | find_biclique_embedding | def find_biclique_embedding(a, b, m, n=None, t=None, target_edges=None):
"""Find an embedding for a biclique in a Chimera graph.
Given a target :term:`Chimera` graph size, and a biclique (a bipartite graph where every
vertex in a set in connected to all vertices in the other set), attempts to find an embedding.
Args:
a (int/iterable):
Left shore of the biclique to embed. If a is an integer, generates an embedding
for a biclique with the left shore of size a labelled [0,a-1].
If a is an iterable, generates an embedding for a biclique with the left shore of size
len(a), where iterable a is the variable labels.
b (int/iterable):
Right shore of the biclique to embed.If b is an integer, generates an embedding
for a biclique with the right shore of size b labelled [0,b-1].
If b is an iterable, generates an embedding for a biclique with the right shore of
size len(b), where iterable b provides the variable labels.
m (int):
Number of rows in the Chimera lattice.
n (int, optional, default=m):
Number of columns in the Chimera lattice.
t (int, optional, default 4):
Size of the shore within each Chimera tile.
target_edges (iterable[edge]):
A list of edges in the target Chimera graph. Nodes are labelled as
returned by :func:`~dwave_networkx.generators.chimera_graph`.
Returns:
tuple: A 2-tuple containing:
dict: An embedding mapping the left shore of the biclique to the Chimera lattice.
dict: An embedding mapping the right shore of the biclique to the Chimera lattice
Examples:
This example finds an embedding for an alphanumerically labeled biclique in a single
Chimera unit cell.
>>> from dwave.embedding.chimera import find_biclique_embedding
...
>>> left, right = find_biclique_embedding(['a', 'b', 'c'], ['d', 'e'], 1, 1)
>>> print(left, right) # doctest: +SKIP
{'a': [4], 'b': [5], 'c': [6]} {'d': [0], 'e': [1]}
"""
_, anodes = a
_, bnodes = b
m, n, t, target_edges = _chimera_input(m, n, t, target_edges)
embedding = processor(target_edges, M=m, N=n, L=t).tightestNativeBiClique(len(anodes), len(bnodes))
if not embedding:
raise ValueError("cannot find a K{},{} embedding for given Chimera lattice".format(a, b))
left, right = embedding
return dict(zip(anodes, left)), dict(zip(bnodes, right)) | python | def find_biclique_embedding(a, b, m, n=None, t=None, target_edges=None):
"""Find an embedding for a biclique in a Chimera graph.
Given a target :term:`Chimera` graph size, and a biclique (a bipartite graph where every
vertex in a set in connected to all vertices in the other set), attempts to find an embedding.
Args:
a (int/iterable):
Left shore of the biclique to embed. If a is an integer, generates an embedding
for a biclique with the left shore of size a labelled [0,a-1].
If a is an iterable, generates an embedding for a biclique with the left shore of size
len(a), where iterable a is the variable labels.
b (int/iterable):
Right shore of the biclique to embed.If b is an integer, generates an embedding
for a biclique with the right shore of size b labelled [0,b-1].
If b is an iterable, generates an embedding for a biclique with the right shore of
size len(b), where iterable b provides the variable labels.
m (int):
Number of rows in the Chimera lattice.
n (int, optional, default=m):
Number of columns in the Chimera lattice.
t (int, optional, default 4):
Size of the shore within each Chimera tile.
target_edges (iterable[edge]):
A list of edges in the target Chimera graph. Nodes are labelled as
returned by :func:`~dwave_networkx.generators.chimera_graph`.
Returns:
tuple: A 2-tuple containing:
dict: An embedding mapping the left shore of the biclique to the Chimera lattice.
dict: An embedding mapping the right shore of the biclique to the Chimera lattice
Examples:
This example finds an embedding for an alphanumerically labeled biclique in a single
Chimera unit cell.
>>> from dwave.embedding.chimera import find_biclique_embedding
...
>>> left, right = find_biclique_embedding(['a', 'b', 'c'], ['d', 'e'], 1, 1)
>>> print(left, right) # doctest: +SKIP
{'a': [4], 'b': [5], 'c': [6]} {'d': [0], 'e': [1]}
"""
_, anodes = a
_, bnodes = b
m, n, t, target_edges = _chimera_input(m, n, t, target_edges)
embedding = processor(target_edges, M=m, N=n, L=t).tightestNativeBiClique(len(anodes), len(bnodes))
if not embedding:
raise ValueError("cannot find a K{},{} embedding for given Chimera lattice".format(a, b))
left, right = embedding
return dict(zip(anodes, left)), dict(zip(bnodes, right)) | [
"def",
"find_biclique_embedding",
"(",
"a",
",",
"b",
",",
"m",
",",
"n",
"=",
"None",
",",
"t",
"=",
"None",
",",
"target_edges",
"=",
"None",
")",
":",
"_",
",",
"anodes",
"=",
"a",
"_",
",",
"bnodes",
"=",
"b",
"m",
",",
"n",
",",
"t",
",",
"target_edges",
"=",
"_chimera_input",
"(",
"m",
",",
"n",
",",
"t",
",",
"target_edges",
")",
"embedding",
"=",
"processor",
"(",
"target_edges",
",",
"M",
"=",
"m",
",",
"N",
"=",
"n",
",",
"L",
"=",
"t",
")",
".",
"tightestNativeBiClique",
"(",
"len",
"(",
"anodes",
")",
",",
"len",
"(",
"bnodes",
")",
")",
"if",
"not",
"embedding",
":",
"raise",
"ValueError",
"(",
"\"cannot find a K{},{} embedding for given Chimera lattice\"",
".",
"format",
"(",
"a",
",",
"b",
")",
")",
"left",
",",
"right",
"=",
"embedding",
"return",
"dict",
"(",
"zip",
"(",
"anodes",
",",
"left",
")",
")",
",",
"dict",
"(",
"zip",
"(",
"bnodes",
",",
"right",
")",
")"
]
| Find an embedding for a biclique in a Chimera graph.
Given a target :term:`Chimera` graph size, and a biclique (a bipartite graph where every
vertex in a set in connected to all vertices in the other set), attempts to find an embedding.
Args:
a (int/iterable):
Left shore of the biclique to embed. If a is an integer, generates an embedding
for a biclique with the left shore of size a labelled [0,a-1].
If a is an iterable, generates an embedding for a biclique with the left shore of size
len(a), where iterable a is the variable labels.
b (int/iterable):
Right shore of the biclique to embed.If b is an integer, generates an embedding
for a biclique with the right shore of size b labelled [0,b-1].
If b is an iterable, generates an embedding for a biclique with the right shore of
size len(b), where iterable b provides the variable labels.
m (int):
Number of rows in the Chimera lattice.
n (int, optional, default=m):
Number of columns in the Chimera lattice.
t (int, optional, default 4):
Size of the shore within each Chimera tile.
target_edges (iterable[edge]):
A list of edges in the target Chimera graph. Nodes are labelled as
returned by :func:`~dwave_networkx.generators.chimera_graph`.
Returns:
tuple: A 2-tuple containing:
dict: An embedding mapping the left shore of the biclique to the Chimera lattice.
dict: An embedding mapping the right shore of the biclique to the Chimera lattice
Examples:
This example finds an embedding for an alphanumerically labeled biclique in a single
Chimera unit cell.
>>> from dwave.embedding.chimera import find_biclique_embedding
...
>>> left, right = find_biclique_embedding(['a', 'b', 'c'], ['d', 'e'], 1, 1)
>>> print(left, right) # doctest: +SKIP
{'a': [4], 'b': [5], 'c': [6]} {'d': [0], 'e': [1]} | [
"Find",
"an",
"embedding",
"for",
"a",
"biclique",
"in",
"a",
"Chimera",
"graph",
"."
]
| 86a1698f15ccd8b0ece0ed868ee49292d3f67f5b | https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/embedding/chimera.py#L111-L171 | train |
dwavesystems/dwave-system | dwave/embedding/chimera.py | find_grid_embedding | def find_grid_embedding(dim, m, n=None, t=4):
"""Find an embedding for a grid in a Chimera graph.
Given a target :term:`Chimera` graph size, and grid dimensions, attempts to find an embedding.
Args:
dim (iterable[int]):
Sizes of each grid dimension. Length can be between 1 and 3.
m (int):
Number of rows in the Chimera lattice.
n (int, optional, default=m):
Number of columns in the Chimera lattice.
t (int, optional, default 4):
Size of the shore within each Chimera tile.
Returns:
dict: An embedding mapping a grid to the Chimera lattice.
Examples:
This example finds an embedding for a 2x3 grid in a 12x12 lattice of Chimera unit cells.
>>> from dwave.embedding.chimera import find_grid_embedding
...
>>> embedding = find_grid_embedding([2, 3], m=12, n=12, t=4)
>>> embedding # doctest: +SKIP
{(0, 0): [0, 4],
(0, 1): [8, 12],
(0, 2): [16, 20],
(1, 0): [96, 100],
(1, 1): [104, 108],
(1, 2): [112, 116]}
"""
m, n, t, target_edges = _chimera_input(m, n, t, None)
indexer = dnx.generators.chimera.chimera_coordinates(m, n, t)
dim = list(dim)
num_dim = len(dim)
if num_dim == 1:
def _key(row, col, aisle): return row
dim.extend([1, 1])
elif num_dim == 2:
def _key(row, col, aisle): return row, col
dim.append(1)
elif num_dim == 3:
def _key(row, col, aisle): return row, col, aisle
else:
raise ValueError("find_grid_embedding supports between one and three dimensions")
rows, cols, aisles = dim
if rows > m or cols > n or aisles > t:
msg = ("the largest grid that find_grid_embedding can fit in a ({}, {}, {}) Chimera-lattice "
"is {}x{}x{}; given grid is {}x{}x{}").format(m, n, t, m, n, t, rows, cols, aisles)
raise ValueError(msg)
return {_key(row, col, aisle): [indexer.int((row, col, 0, aisle)), indexer.int((row, col, 1, aisle))]
for row in range(dim[0]) for col in range(dim[1]) for aisle in range(dim[2])} | python | def find_grid_embedding(dim, m, n=None, t=4):
"""Find an embedding for a grid in a Chimera graph.
Given a target :term:`Chimera` graph size, and grid dimensions, attempts to find an embedding.
Args:
dim (iterable[int]):
Sizes of each grid dimension. Length can be between 1 and 3.
m (int):
Number of rows in the Chimera lattice.
n (int, optional, default=m):
Number of columns in the Chimera lattice.
t (int, optional, default 4):
Size of the shore within each Chimera tile.
Returns:
dict: An embedding mapping a grid to the Chimera lattice.
Examples:
This example finds an embedding for a 2x3 grid in a 12x12 lattice of Chimera unit cells.
>>> from dwave.embedding.chimera import find_grid_embedding
...
>>> embedding = find_grid_embedding([2, 3], m=12, n=12, t=4)
>>> embedding # doctest: +SKIP
{(0, 0): [0, 4],
(0, 1): [8, 12],
(0, 2): [16, 20],
(1, 0): [96, 100],
(1, 1): [104, 108],
(1, 2): [112, 116]}
"""
m, n, t, target_edges = _chimera_input(m, n, t, None)
indexer = dnx.generators.chimera.chimera_coordinates(m, n, t)
dim = list(dim)
num_dim = len(dim)
if num_dim == 1:
def _key(row, col, aisle): return row
dim.extend([1, 1])
elif num_dim == 2:
def _key(row, col, aisle): return row, col
dim.append(1)
elif num_dim == 3:
def _key(row, col, aisle): return row, col, aisle
else:
raise ValueError("find_grid_embedding supports between one and three dimensions")
rows, cols, aisles = dim
if rows > m or cols > n or aisles > t:
msg = ("the largest grid that find_grid_embedding can fit in a ({}, {}, {}) Chimera-lattice "
"is {}x{}x{}; given grid is {}x{}x{}").format(m, n, t, m, n, t, rows, cols, aisles)
raise ValueError(msg)
return {_key(row, col, aisle): [indexer.int((row, col, 0, aisle)), indexer.int((row, col, 1, aisle))]
for row in range(dim[0]) for col in range(dim[1]) for aisle in range(dim[2])} | [
"def",
"find_grid_embedding",
"(",
"dim",
",",
"m",
",",
"n",
"=",
"None",
",",
"t",
"=",
"4",
")",
":",
"m",
",",
"n",
",",
"t",
",",
"target_edges",
"=",
"_chimera_input",
"(",
"m",
",",
"n",
",",
"t",
",",
"None",
")",
"indexer",
"=",
"dnx",
".",
"generators",
".",
"chimera",
".",
"chimera_coordinates",
"(",
"m",
",",
"n",
",",
"t",
")",
"dim",
"=",
"list",
"(",
"dim",
")",
"num_dim",
"=",
"len",
"(",
"dim",
")",
"if",
"num_dim",
"==",
"1",
":",
"def",
"_key",
"(",
"row",
",",
"col",
",",
"aisle",
")",
":",
"return",
"row",
"dim",
".",
"extend",
"(",
"[",
"1",
",",
"1",
"]",
")",
"elif",
"num_dim",
"==",
"2",
":",
"def",
"_key",
"(",
"row",
",",
"col",
",",
"aisle",
")",
":",
"return",
"row",
",",
"col",
"dim",
".",
"append",
"(",
"1",
")",
"elif",
"num_dim",
"==",
"3",
":",
"def",
"_key",
"(",
"row",
",",
"col",
",",
"aisle",
")",
":",
"return",
"row",
",",
"col",
",",
"aisle",
"else",
":",
"raise",
"ValueError",
"(",
"\"find_grid_embedding supports between one and three dimensions\"",
")",
"rows",
",",
"cols",
",",
"aisles",
"=",
"dim",
"if",
"rows",
">",
"m",
"or",
"cols",
">",
"n",
"or",
"aisles",
">",
"t",
":",
"msg",
"=",
"(",
"\"the largest grid that find_grid_embedding can fit in a ({}, {}, {}) Chimera-lattice \"",
"\"is {}x{}x{}; given grid is {}x{}x{}\"",
")",
".",
"format",
"(",
"m",
",",
"n",
",",
"t",
",",
"m",
",",
"n",
",",
"t",
",",
"rows",
",",
"cols",
",",
"aisles",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"return",
"{",
"_key",
"(",
"row",
",",
"col",
",",
"aisle",
")",
":",
"[",
"indexer",
".",
"int",
"(",
"(",
"row",
",",
"col",
",",
"0",
",",
"aisle",
")",
")",
",",
"indexer",
".",
"int",
"(",
"(",
"row",
",",
"col",
",",
"1",
",",
"aisle",
")",
")",
"]",
"for",
"row",
"in",
"range",
"(",
"dim",
"[",
"0",
"]",
")",
"for",
"col",
"in",
"range",
"(",
"dim",
"[",
"1",
"]",
")",
"for",
"aisle",
"in",
"range",
"(",
"dim",
"[",
"2",
"]",
")",
"}"
]
| Find an embedding for a grid in a Chimera graph.
Given a target :term:`Chimera` graph size, and grid dimensions, attempts to find an embedding.
Args:
dim (iterable[int]):
Sizes of each grid dimension. Length can be between 1 and 3.
m (int):
Number of rows in the Chimera lattice.
n (int, optional, default=m):
Number of columns in the Chimera lattice.
t (int, optional, default 4):
Size of the shore within each Chimera tile.
Returns:
dict: An embedding mapping a grid to the Chimera lattice.
Examples:
This example finds an embedding for a 2x3 grid in a 12x12 lattice of Chimera unit cells.
>>> from dwave.embedding.chimera import find_grid_embedding
...
>>> embedding = find_grid_embedding([2, 3], m=12, n=12, t=4)
>>> embedding # doctest: +SKIP
{(0, 0): [0, 4],
(0, 1): [8, 12],
(0, 2): [16, 20],
(1, 0): [96, 100],
(1, 1): [104, 108],
(1, 2): [112, 116]} | [
"Find",
"an",
"embedding",
"for",
"a",
"grid",
"in",
"a",
"Chimera",
"graph",
"."
]
| 86a1698f15ccd8b0ece0ed868ee49292d3f67f5b | https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/embedding/chimera.py#L174-L234 | train |
dwavesystems/dwave-system | dwave/system/composites/cutoffcomposite.py | CutOffComposite.sample | def sample(self, bqm, **parameters):
"""Cutoff and sample from the provided binary quadratic model.
Removes interactions smaller than a given cutoff. Isolated
variables (after the cutoff) are also removed.
Note that if the problem had isolated variables before the cutoff, they
will also be affected.
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
child = self.child
cutoff = self._cutoff
cutoff_vartype = self._cutoff_vartype
comp = self._comparison
if cutoff_vartype is dimod.SPIN:
original = bqm.spin
else:
original = bqm.binary
# remove all of the interactions less than cutoff
new = type(bqm)(original.linear,
((u, v, bias)
for (u, v), bias in original.quadratic.items()
if not comp(abs(bias), cutoff)),
original.offset,
original.vartype)
# next we check for isolated qubits and remove them, we could do this as
# part of the construction but the assumption is there should not be
# a large number in the 'typical' case
isolated = [v for v in new if not new.adj[v]]
new.remove_variables_from(isolated)
if isolated and len(new) == 0:
# in this case all variables are isolated, so we just put one back
# to serve as the basis
v = isolated.pop()
new.linear[v] = original.linear[v]
# get the samples from the child sampler and put them into the original vartype
sampleset = child.sample(new, **parameters).change_vartype(bqm.vartype, inplace=True)
# we now need to add the isolated back in, in a way that minimizes
# the energy. There are lots of ways to do this but for now we'll just
# do one
if isolated:
samples, variables = _restore_isolated(sampleset, bqm, isolated)
else:
samples = sampleset.record.sample
variables = sampleset.variables
vectors = sampleset.data_vectors
vectors.pop('energy') # we're going to recalculate the energy anyway
return dimod.SampleSet.from_samples_bqm((samples, variables), bqm, **vectors) | python | def sample(self, bqm, **parameters):
"""Cutoff and sample from the provided binary quadratic model.
Removes interactions smaller than a given cutoff. Isolated
variables (after the cutoff) are also removed.
Note that if the problem had isolated variables before the cutoff, they
will also be affected.
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
child = self.child
cutoff = self._cutoff
cutoff_vartype = self._cutoff_vartype
comp = self._comparison
if cutoff_vartype is dimod.SPIN:
original = bqm.spin
else:
original = bqm.binary
# remove all of the interactions less than cutoff
new = type(bqm)(original.linear,
((u, v, bias)
for (u, v), bias in original.quadratic.items()
if not comp(abs(bias), cutoff)),
original.offset,
original.vartype)
# next we check for isolated qubits and remove them, we could do this as
# part of the construction but the assumption is there should not be
# a large number in the 'typical' case
isolated = [v for v in new if not new.adj[v]]
new.remove_variables_from(isolated)
if isolated and len(new) == 0:
# in this case all variables are isolated, so we just put one back
# to serve as the basis
v = isolated.pop()
new.linear[v] = original.linear[v]
# get the samples from the child sampler and put them into the original vartype
sampleset = child.sample(new, **parameters).change_vartype(bqm.vartype, inplace=True)
# we now need to add the isolated back in, in a way that minimizes
# the energy. There are lots of ways to do this but for now we'll just
# do one
if isolated:
samples, variables = _restore_isolated(sampleset, bqm, isolated)
else:
samples = sampleset.record.sample
variables = sampleset.variables
vectors = sampleset.data_vectors
vectors.pop('energy') # we're going to recalculate the energy anyway
return dimod.SampleSet.from_samples_bqm((samples, variables), bqm, **vectors) | [
"def",
"sample",
"(",
"self",
",",
"bqm",
",",
"*",
"*",
"parameters",
")",
":",
"child",
"=",
"self",
".",
"child",
"cutoff",
"=",
"self",
".",
"_cutoff",
"cutoff_vartype",
"=",
"self",
".",
"_cutoff_vartype",
"comp",
"=",
"self",
".",
"_comparison",
"if",
"cutoff_vartype",
"is",
"dimod",
".",
"SPIN",
":",
"original",
"=",
"bqm",
".",
"spin",
"else",
":",
"original",
"=",
"bqm",
".",
"binary",
"# remove all of the interactions less than cutoff",
"new",
"=",
"type",
"(",
"bqm",
")",
"(",
"original",
".",
"linear",
",",
"(",
"(",
"u",
",",
"v",
",",
"bias",
")",
"for",
"(",
"u",
",",
"v",
")",
",",
"bias",
"in",
"original",
".",
"quadratic",
".",
"items",
"(",
")",
"if",
"not",
"comp",
"(",
"abs",
"(",
"bias",
")",
",",
"cutoff",
")",
")",
",",
"original",
".",
"offset",
",",
"original",
".",
"vartype",
")",
"# next we check for isolated qubits and remove them, we could do this as",
"# part of the construction but the assumption is there should not be",
"# a large number in the 'typical' case",
"isolated",
"=",
"[",
"v",
"for",
"v",
"in",
"new",
"if",
"not",
"new",
".",
"adj",
"[",
"v",
"]",
"]",
"new",
".",
"remove_variables_from",
"(",
"isolated",
")",
"if",
"isolated",
"and",
"len",
"(",
"new",
")",
"==",
"0",
":",
"# in this case all variables are isolated, so we just put one back",
"# to serve as the basis",
"v",
"=",
"isolated",
".",
"pop",
"(",
")",
"new",
".",
"linear",
"[",
"v",
"]",
"=",
"original",
".",
"linear",
"[",
"v",
"]",
"# get the samples from the child sampler and put them into the original vartype",
"sampleset",
"=",
"child",
".",
"sample",
"(",
"new",
",",
"*",
"*",
"parameters",
")",
".",
"change_vartype",
"(",
"bqm",
".",
"vartype",
",",
"inplace",
"=",
"True",
")",
"# we now need to add the isolated back in, in a way that minimizes",
"# the energy. There are lots of ways to do this but for now we'll just",
"# do one",
"if",
"isolated",
":",
"samples",
",",
"variables",
"=",
"_restore_isolated",
"(",
"sampleset",
",",
"bqm",
",",
"isolated",
")",
"else",
":",
"samples",
"=",
"sampleset",
".",
"record",
".",
"sample",
"variables",
"=",
"sampleset",
".",
"variables",
"vectors",
"=",
"sampleset",
".",
"data_vectors",
"vectors",
".",
"pop",
"(",
"'energy'",
")",
"# we're going to recalculate the energy anyway",
"return",
"dimod",
".",
"SampleSet",
".",
"from_samples_bqm",
"(",
"(",
"samples",
",",
"variables",
")",
",",
"bqm",
",",
"*",
"*",
"vectors",
")"
]
| Cutoff and sample from the provided binary quadratic model.
Removes interactions smaller than a given cutoff. Isolated
variables (after the cutoff) are also removed.
Note that if the problem had isolated variables before the cutoff, they
will also be affected.
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet` | [
"Cutoff",
"and",
"sample",
"from",
"the",
"provided",
"binary",
"quadratic",
"model",
"."
]
| 86a1698f15ccd8b0ece0ed868ee49292d3f67f5b | https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/system/composites/cutoffcomposite.py#L79-L144 | train |
dwavesystems/dwave-system | dwave/system/composites/cutoffcomposite.py | PolyCutOffComposite.sample_poly | def sample_poly(self, poly, **kwargs):
"""Cutoff and sample from the provided binary polynomial.
Removes interactions smaller than a given cutoff. Isolated
variables (after the cutoff) are also removed.
Note that if the problem had isolated variables before the cutoff, they
will also be affected.
Args:
poly (:obj:`dimod.BinaryPolynomial`):
Binary polynomial to be sampled from.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
child = self.child
cutoff = self._cutoff
cutoff_vartype = self._cutoff_vartype
comp = self._comparison
if cutoff_vartype is dimod.SPIN:
original = poly.to_spin(copy=False)
else:
original = poly.to_binary(copy=False)
# remove all of the terms of order >= 2 that have a bias less than cutoff
new = type(poly)(((term, bias) for term, bias in original.items()
if len(term) > 1 and not comp(abs(bias), cutoff)),
cutoff_vartype)
# also include the linear biases for the variables in new
for v in new.variables:
term = v,
if term in original:
new[term] = original[term]
# everything else is isolated
isolated = list(original.variables.difference(new.variables))
if isolated and len(new) == 0:
# in this case all variables are isolated, so we just put one back
# to serve as the basis
term = isolated.pop(),
new[term] = original[term]
# get the samples from the child sampler and put them into the original vartype
sampleset = child.sample_poly(new, **kwargs).change_vartype(poly.vartype, inplace=True)
# we now need to add the isolated back in, in a way that minimizes
# the energy. There are lots of ways to do this but for now we'll just
# do one
if isolated:
samples, variables = _restore_isolated_higherorder(sampleset, poly, isolated)
else:
samples = sampleset.record.sample
variables = sampleset.variables
vectors = sampleset.data_vectors
vectors.pop('energy') # we're going to recalculate the energy anyway
return dimod.SampleSet.from_samples_bqm((samples, variables), poly, **vectors) | python | def sample_poly(self, poly, **kwargs):
"""Cutoff and sample from the provided binary polynomial.
Removes interactions smaller than a given cutoff. Isolated
variables (after the cutoff) are also removed.
Note that if the problem had isolated variables before the cutoff, they
will also be affected.
Args:
poly (:obj:`dimod.BinaryPolynomial`):
Binary polynomial to be sampled from.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
child = self.child
cutoff = self._cutoff
cutoff_vartype = self._cutoff_vartype
comp = self._comparison
if cutoff_vartype is dimod.SPIN:
original = poly.to_spin(copy=False)
else:
original = poly.to_binary(copy=False)
# remove all of the terms of order >= 2 that have a bias less than cutoff
new = type(poly)(((term, bias) for term, bias in original.items()
if len(term) > 1 and not comp(abs(bias), cutoff)),
cutoff_vartype)
# also include the linear biases for the variables in new
for v in new.variables:
term = v,
if term in original:
new[term] = original[term]
# everything else is isolated
isolated = list(original.variables.difference(new.variables))
if isolated and len(new) == 0:
# in this case all variables are isolated, so we just put one back
# to serve as the basis
term = isolated.pop(),
new[term] = original[term]
# get the samples from the child sampler and put them into the original vartype
sampleset = child.sample_poly(new, **kwargs).change_vartype(poly.vartype, inplace=True)
# we now need to add the isolated back in, in a way that minimizes
# the energy. There are lots of ways to do this but for now we'll just
# do one
if isolated:
samples, variables = _restore_isolated_higherorder(sampleset, poly, isolated)
else:
samples = sampleset.record.sample
variables = sampleset.variables
vectors = sampleset.data_vectors
vectors.pop('energy') # we're going to recalculate the energy anyway
return dimod.SampleSet.from_samples_bqm((samples, variables), poly, **vectors) | [
"def",
"sample_poly",
"(",
"self",
",",
"poly",
",",
"*",
"*",
"kwargs",
")",
":",
"child",
"=",
"self",
".",
"child",
"cutoff",
"=",
"self",
".",
"_cutoff",
"cutoff_vartype",
"=",
"self",
".",
"_cutoff_vartype",
"comp",
"=",
"self",
".",
"_comparison",
"if",
"cutoff_vartype",
"is",
"dimod",
".",
"SPIN",
":",
"original",
"=",
"poly",
".",
"to_spin",
"(",
"copy",
"=",
"False",
")",
"else",
":",
"original",
"=",
"poly",
".",
"to_binary",
"(",
"copy",
"=",
"False",
")",
"# remove all of the terms of order >= 2 that have a bias less than cutoff",
"new",
"=",
"type",
"(",
"poly",
")",
"(",
"(",
"(",
"term",
",",
"bias",
")",
"for",
"term",
",",
"bias",
"in",
"original",
".",
"items",
"(",
")",
"if",
"len",
"(",
"term",
")",
">",
"1",
"and",
"not",
"comp",
"(",
"abs",
"(",
"bias",
")",
",",
"cutoff",
")",
")",
",",
"cutoff_vartype",
")",
"# also include the linear biases for the variables in new",
"for",
"v",
"in",
"new",
".",
"variables",
":",
"term",
"=",
"v",
",",
"if",
"term",
"in",
"original",
":",
"new",
"[",
"term",
"]",
"=",
"original",
"[",
"term",
"]",
"# everything else is isolated",
"isolated",
"=",
"list",
"(",
"original",
".",
"variables",
".",
"difference",
"(",
"new",
".",
"variables",
")",
")",
"if",
"isolated",
"and",
"len",
"(",
"new",
")",
"==",
"0",
":",
"# in this case all variables are isolated, so we just put one back",
"# to serve as the basis",
"term",
"=",
"isolated",
".",
"pop",
"(",
")",
",",
"new",
"[",
"term",
"]",
"=",
"original",
"[",
"term",
"]",
"# get the samples from the child sampler and put them into the original vartype",
"sampleset",
"=",
"child",
".",
"sample_poly",
"(",
"new",
",",
"*",
"*",
"kwargs",
")",
".",
"change_vartype",
"(",
"poly",
".",
"vartype",
",",
"inplace",
"=",
"True",
")",
"# we now need to add the isolated back in, in a way that minimizes",
"# the energy. There are lots of ways to do this but for now we'll just",
"# do one",
"if",
"isolated",
":",
"samples",
",",
"variables",
"=",
"_restore_isolated_higherorder",
"(",
"sampleset",
",",
"poly",
",",
"isolated",
")",
"else",
":",
"samples",
"=",
"sampleset",
".",
"record",
".",
"sample",
"variables",
"=",
"sampleset",
".",
"variables",
"vectors",
"=",
"sampleset",
".",
"data_vectors",
"vectors",
".",
"pop",
"(",
"'energy'",
")",
"# we're going to recalculate the energy anyway",
"return",
"dimod",
".",
"SampleSet",
".",
"from_samples_bqm",
"(",
"(",
"samples",
",",
"variables",
")",
",",
"poly",
",",
"*",
"*",
"vectors",
")"
]
| Cutoff and sample from the provided binary polynomial.
Removes interactions smaller than a given cutoff. Isolated
variables (after the cutoff) are also removed.
Note that if the problem had isolated variables before the cutoff, they
will also be affected.
Args:
poly (:obj:`dimod.BinaryPolynomial`):
Binary polynomial to be sampled from.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet` | [
"Cutoff",
"and",
"sample",
"from",
"the",
"provided",
"binary",
"polynomial",
"."
]
| 86a1698f15ccd8b0ece0ed868ee49292d3f67f5b | https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/system/composites/cutoffcomposite.py#L231-L296 | train |
dwavesystems/dwave-system | dwave/embedding/diagnostic.py | diagnose_embedding | def diagnose_embedding(emb, source, target):
"""A detailed diagnostic for minor embeddings.
This diagnostic produces a generator, which lists all issues with `emb`. The errors
are yielded in the form
ExceptionClass, arg1, arg2,...
where the arguments following the class are used to construct the exception object.
User-friendly variants of this function are :func:`is_valid_embedding`, which returns a
bool, and :func:`verify_embedding` which raises the first observed error. All exceptions
are subclasses of :exc:`.EmbeddingError`.
Args:
emb (dict):
Dictionary mapping source nodes to arrays of target nodes.
source (list/:obj:`networkx.Graph`):
Graph to be embedded as a NetworkX graph or a list of edges.
target (list/:obj:`networkx.Graph`):
Graph being embedded into as a NetworkX graph or a list of edges.
Yields:
One of:
:exc:`.MissingChainError`, snode: a source node label that does not occur as a key of `emb`, or for which emb[snode] is empty
:exc:`.ChainOverlapError`, tnode, snode0, snode0: a target node which occurs in both `emb[snode0]` and `emb[snode1]`
:exc:`.DisconnectedChainError`, snode: a source node label whose chain is not a connected subgraph of `target`
:exc:`.InvalidNodeError`, tnode, snode: a source node label and putative target node label which is not a node of `target`
:exc:`.MissingEdgeError`, snode0, snode1: a pair of source node labels defining an edge which is not present between their chains
"""
if not hasattr(source, 'edges'):
source = nx.Graph(source)
if not hasattr(target, 'edges'):
target = nx.Graph(target)
label = {}
embedded = set()
for x in source:
try:
embx = emb[x]
missing_chain = len(embx) == 0
except KeyError:
missing_chain = True
if missing_chain:
yield MissingChainError, x
continue
all_present = True
for q in embx:
if label.get(q, x) != x:
yield ChainOverlapError, q, x, label[q]
elif q not in target:
all_present = False
yield InvalidNodeError, x, q
else:
label[q] = x
if all_present:
embedded.add(x)
if not nx.is_connected(target.subgraph(embx)):
yield DisconnectedChainError, x
yielded = nx.Graph()
for p, q in target.subgraph(label).edges():
yielded.add_edge(label[p], label[q])
for x, y in source.edges():
if x == y:
continue
if x in embedded and y in embedded and not yielded.has_edge(x, y):
yield MissingEdgeError, x, y | python | def diagnose_embedding(emb, source, target):
"""A detailed diagnostic for minor embeddings.
This diagnostic produces a generator, which lists all issues with `emb`. The errors
are yielded in the form
ExceptionClass, arg1, arg2,...
where the arguments following the class are used to construct the exception object.
User-friendly variants of this function are :func:`is_valid_embedding`, which returns a
bool, and :func:`verify_embedding` which raises the first observed error. All exceptions
are subclasses of :exc:`.EmbeddingError`.
Args:
emb (dict):
Dictionary mapping source nodes to arrays of target nodes.
source (list/:obj:`networkx.Graph`):
Graph to be embedded as a NetworkX graph or a list of edges.
target (list/:obj:`networkx.Graph`):
Graph being embedded into as a NetworkX graph or a list of edges.
Yields:
One of:
:exc:`.MissingChainError`, snode: a source node label that does not occur as a key of `emb`, or for which emb[snode] is empty
:exc:`.ChainOverlapError`, tnode, snode0, snode0: a target node which occurs in both `emb[snode0]` and `emb[snode1]`
:exc:`.DisconnectedChainError`, snode: a source node label whose chain is not a connected subgraph of `target`
:exc:`.InvalidNodeError`, tnode, snode: a source node label and putative target node label which is not a node of `target`
:exc:`.MissingEdgeError`, snode0, snode1: a pair of source node labels defining an edge which is not present between their chains
"""
if not hasattr(source, 'edges'):
source = nx.Graph(source)
if not hasattr(target, 'edges'):
target = nx.Graph(target)
label = {}
embedded = set()
for x in source:
try:
embx = emb[x]
missing_chain = len(embx) == 0
except KeyError:
missing_chain = True
if missing_chain:
yield MissingChainError, x
continue
all_present = True
for q in embx:
if label.get(q, x) != x:
yield ChainOverlapError, q, x, label[q]
elif q not in target:
all_present = False
yield InvalidNodeError, x, q
else:
label[q] = x
if all_present:
embedded.add(x)
if not nx.is_connected(target.subgraph(embx)):
yield DisconnectedChainError, x
yielded = nx.Graph()
for p, q in target.subgraph(label).edges():
yielded.add_edge(label[p], label[q])
for x, y in source.edges():
if x == y:
continue
if x in embedded and y in embedded and not yielded.has_edge(x, y):
yield MissingEdgeError, x, y | [
"def",
"diagnose_embedding",
"(",
"emb",
",",
"source",
",",
"target",
")",
":",
"if",
"not",
"hasattr",
"(",
"source",
",",
"'edges'",
")",
":",
"source",
"=",
"nx",
".",
"Graph",
"(",
"source",
")",
"if",
"not",
"hasattr",
"(",
"target",
",",
"'edges'",
")",
":",
"target",
"=",
"nx",
".",
"Graph",
"(",
"target",
")",
"label",
"=",
"{",
"}",
"embedded",
"=",
"set",
"(",
")",
"for",
"x",
"in",
"source",
":",
"try",
":",
"embx",
"=",
"emb",
"[",
"x",
"]",
"missing_chain",
"=",
"len",
"(",
"embx",
")",
"==",
"0",
"except",
"KeyError",
":",
"missing_chain",
"=",
"True",
"if",
"missing_chain",
":",
"yield",
"MissingChainError",
",",
"x",
"continue",
"all_present",
"=",
"True",
"for",
"q",
"in",
"embx",
":",
"if",
"label",
".",
"get",
"(",
"q",
",",
"x",
")",
"!=",
"x",
":",
"yield",
"ChainOverlapError",
",",
"q",
",",
"x",
",",
"label",
"[",
"q",
"]",
"elif",
"q",
"not",
"in",
"target",
":",
"all_present",
"=",
"False",
"yield",
"InvalidNodeError",
",",
"x",
",",
"q",
"else",
":",
"label",
"[",
"q",
"]",
"=",
"x",
"if",
"all_present",
":",
"embedded",
".",
"add",
"(",
"x",
")",
"if",
"not",
"nx",
".",
"is_connected",
"(",
"target",
".",
"subgraph",
"(",
"embx",
")",
")",
":",
"yield",
"DisconnectedChainError",
",",
"x",
"yielded",
"=",
"nx",
".",
"Graph",
"(",
")",
"for",
"p",
",",
"q",
"in",
"target",
".",
"subgraph",
"(",
"label",
")",
".",
"edges",
"(",
")",
":",
"yielded",
".",
"add_edge",
"(",
"label",
"[",
"p",
"]",
",",
"label",
"[",
"q",
"]",
")",
"for",
"x",
",",
"y",
"in",
"source",
".",
"edges",
"(",
")",
":",
"if",
"x",
"==",
"y",
":",
"continue",
"if",
"x",
"in",
"embedded",
"and",
"y",
"in",
"embedded",
"and",
"not",
"yielded",
".",
"has_edge",
"(",
"x",
",",
"y",
")",
":",
"yield",
"MissingEdgeError",
",",
"x",
",",
"y"
]
| A detailed diagnostic for minor embeddings.
This diagnostic produces a generator, which lists all issues with `emb`. The errors
are yielded in the form
ExceptionClass, arg1, arg2,...
where the arguments following the class are used to construct the exception object.
User-friendly variants of this function are :func:`is_valid_embedding`, which returns a
bool, and :func:`verify_embedding` which raises the first observed error. All exceptions
are subclasses of :exc:`.EmbeddingError`.
Args:
emb (dict):
Dictionary mapping source nodes to arrays of target nodes.
source (list/:obj:`networkx.Graph`):
Graph to be embedded as a NetworkX graph or a list of edges.
target (list/:obj:`networkx.Graph`):
Graph being embedded into as a NetworkX graph or a list of edges.
Yields:
One of:
:exc:`.MissingChainError`, snode: a source node label that does not occur as a key of `emb`, or for which emb[snode] is empty
:exc:`.ChainOverlapError`, tnode, snode0, snode0: a target node which occurs in both `emb[snode0]` and `emb[snode1]`
:exc:`.DisconnectedChainError`, snode: a source node label whose chain is not a connected subgraph of `target`
:exc:`.InvalidNodeError`, tnode, snode: a source node label and putative target node label which is not a node of `target`
:exc:`.MissingEdgeError`, snode0, snode1: a pair of source node labels defining an edge which is not present between their chains | [
"A",
"detailed",
"diagnostic",
"for",
"minor",
"embeddings",
"."
]
| 86a1698f15ccd8b0ece0ed868ee49292d3f67f5b | https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/embedding/diagnostic.py#L23-L96 | train |
Jaza/flask-restplus-patched | flask_restplus_patched/namespace.py | Namespace.model | def model(self, name=None, model=None, mask=None, **kwargs):
"""
Model registration decorator.
"""
if isinstance(model, (flask_marshmallow.Schema, flask_marshmallow.base_fields.FieldABC)):
if not name:
name = model.__class__.__name__
api_model = Model(name, model, mask=mask)
api_model.__apidoc__ = kwargs
return self.add_model(name, api_model)
return super(Namespace, self).model(name=name, model=model, **kwargs) | python | def model(self, name=None, model=None, mask=None, **kwargs):
"""
Model registration decorator.
"""
if isinstance(model, (flask_marshmallow.Schema, flask_marshmallow.base_fields.FieldABC)):
if not name:
name = model.__class__.__name__
api_model = Model(name, model, mask=mask)
api_model.__apidoc__ = kwargs
return self.add_model(name, api_model)
return super(Namespace, self).model(name=name, model=model, **kwargs) | [
"def",
"model",
"(",
"self",
",",
"name",
"=",
"None",
",",
"model",
"=",
"None",
",",
"mask",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"model",
",",
"(",
"flask_marshmallow",
".",
"Schema",
",",
"flask_marshmallow",
".",
"base_fields",
".",
"FieldABC",
")",
")",
":",
"if",
"not",
"name",
":",
"name",
"=",
"model",
".",
"__class__",
".",
"__name__",
"api_model",
"=",
"Model",
"(",
"name",
",",
"model",
",",
"mask",
"=",
"mask",
")",
"api_model",
".",
"__apidoc__",
"=",
"kwargs",
"return",
"self",
".",
"add_model",
"(",
"name",
",",
"api_model",
")",
"return",
"super",
"(",
"Namespace",
",",
"self",
")",
".",
"model",
"(",
"name",
"=",
"name",
",",
"model",
"=",
"model",
",",
"*",
"*",
"kwargs",
")"
]
| Model registration decorator. | [
"Model",
"registration",
"decorator",
"."
]
| 38b4a030f28e6aec374d105173aa5e9b6bd51e5e | https://github.com/Jaza/flask-restplus-patched/blob/38b4a030f28e6aec374d105173aa5e9b6bd51e5e/flask_restplus_patched/namespace.py#L62-L72 | train |
Jaza/flask-restplus-patched | flask_restplus_patched/namespace.py | Namespace.parameters | def parameters(self, parameters, locations=None):
"""
Endpoint parameters registration decorator.
"""
def decorator(func):
if locations is None and parameters.many:
_locations = ('json', )
else:
_locations = locations
if _locations is not None:
parameters.context['in'] = _locations
return self.doc(params=parameters)(
self.response(code=HTTPStatus.UNPROCESSABLE_ENTITY)(
self.WEBARGS_PARSER.use_args(parameters, locations=_locations)(
func
)
)
)
return decorator | python | def parameters(self, parameters, locations=None):
"""
Endpoint parameters registration decorator.
"""
def decorator(func):
if locations is None and parameters.many:
_locations = ('json', )
else:
_locations = locations
if _locations is not None:
parameters.context['in'] = _locations
return self.doc(params=parameters)(
self.response(code=HTTPStatus.UNPROCESSABLE_ENTITY)(
self.WEBARGS_PARSER.use_args(parameters, locations=_locations)(
func
)
)
)
return decorator | [
"def",
"parameters",
"(",
"self",
",",
"parameters",
",",
"locations",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"if",
"locations",
"is",
"None",
"and",
"parameters",
".",
"many",
":",
"_locations",
"=",
"(",
"'json'",
",",
")",
"else",
":",
"_locations",
"=",
"locations",
"if",
"_locations",
"is",
"not",
"None",
":",
"parameters",
".",
"context",
"[",
"'in'",
"]",
"=",
"_locations",
"return",
"self",
".",
"doc",
"(",
"params",
"=",
"parameters",
")",
"(",
"self",
".",
"response",
"(",
"code",
"=",
"HTTPStatus",
".",
"UNPROCESSABLE_ENTITY",
")",
"(",
"self",
".",
"WEBARGS_PARSER",
".",
"use_args",
"(",
"parameters",
",",
"locations",
"=",
"_locations",
")",
"(",
"func",
")",
")",
")",
"return",
"decorator"
]
| Endpoint parameters registration decorator. | [
"Endpoint",
"parameters",
"registration",
"decorator",
"."
]
| 38b4a030f28e6aec374d105173aa5e9b6bd51e5e | https://github.com/Jaza/flask-restplus-patched/blob/38b4a030f28e6aec374d105173aa5e9b6bd51e5e/flask_restplus_patched/namespace.py#L74-L94 | train |
Jaza/flask-restplus-patched | flask_restplus_patched/namespace.py | Namespace.response | def response(self, model=None, code=HTTPStatus.OK, description=None, **kwargs):
"""
Endpoint response OpenAPI documentation decorator.
It automatically documents HTTPError%(code)d responses with relevant
schemas.
Arguments:
model (flask_marshmallow.Schema) - it can be a class or an instance
of the class, which will be used for OpenAPI documentation
purposes. It can be omitted if ``code`` argument is set to an
error HTTP status code.
code (int) - HTTP status code which is documented.
description (str)
Example:
>>> @namespace.response(BaseTeamSchema(many=True))
... @namespace.response(code=HTTPStatus.FORBIDDEN)
... def get_teams():
... if not user.is_admin:
... abort(HTTPStatus.FORBIDDEN)
... return Team.query.all()
"""
code = HTTPStatus(code)
if code is HTTPStatus.NO_CONTENT:
assert model is None
if model is None and code not in {HTTPStatus.ACCEPTED, HTTPStatus.NO_CONTENT}:
if code.value not in http_exceptions.default_exceptions:
raise ValueError("`model` parameter is required for code %d" % code)
model = self.model(
name='HTTPError%d' % code,
model=DefaultHTTPErrorSchema(http_code=code)
)
if description is None:
description = code.description
def response_serializer_decorator(func):
"""
This decorator handles responses to serialize the returned value
with a given model.
"""
def dump_wrapper(*args, **kwargs):
# pylint: disable=missing-docstring
response = func(*args, **kwargs)
extra_headers = None
if response is None:
if model is not None:
raise ValueError("Response cannot not be None with HTTP status %d" % code)
return flask.Response(status=code)
elif isinstance(response, flask.Response) or model is None:
return response
elif isinstance(response, tuple):
response, _code, extra_headers = unpack(response)
else:
_code = code
if HTTPStatus(_code) is code:
response = model.dump(response).data
return response, _code, extra_headers
return dump_wrapper
def decorator(func_or_class):
if code.value in http_exceptions.default_exceptions:
# If the code is handled by raising an exception, it will
# produce a response later, so we don't need to apply a useless
# wrapper.
decorated_func_or_class = func_or_class
elif isinstance(func_or_class, type):
# Handle Resource classes decoration
# pylint: disable=protected-access
func_or_class._apply_decorator_to_methods(response_serializer_decorator)
decorated_func_or_class = func_or_class
else:
decorated_func_or_class = wraps(func_or_class)(
response_serializer_decorator(func_or_class)
)
if model is None:
api_model = None
else:
if isinstance(model, Model):
api_model = model
else:
api_model = self.model(model=model)
if getattr(model, 'many', False):
api_model = [api_model]
doc_decorator = self.doc(
responses={
code.value: (description, api_model)
}
)
return doc_decorator(decorated_func_or_class)
return decorator | python | def response(self, model=None, code=HTTPStatus.OK, description=None, **kwargs):
"""
Endpoint response OpenAPI documentation decorator.
It automatically documents HTTPError%(code)d responses with relevant
schemas.
Arguments:
model (flask_marshmallow.Schema) - it can be a class or an instance
of the class, which will be used for OpenAPI documentation
purposes. It can be omitted if ``code`` argument is set to an
error HTTP status code.
code (int) - HTTP status code which is documented.
description (str)
Example:
>>> @namespace.response(BaseTeamSchema(many=True))
... @namespace.response(code=HTTPStatus.FORBIDDEN)
... def get_teams():
... if not user.is_admin:
... abort(HTTPStatus.FORBIDDEN)
... return Team.query.all()
"""
code = HTTPStatus(code)
if code is HTTPStatus.NO_CONTENT:
assert model is None
if model is None and code not in {HTTPStatus.ACCEPTED, HTTPStatus.NO_CONTENT}:
if code.value not in http_exceptions.default_exceptions:
raise ValueError("`model` parameter is required for code %d" % code)
model = self.model(
name='HTTPError%d' % code,
model=DefaultHTTPErrorSchema(http_code=code)
)
if description is None:
description = code.description
def response_serializer_decorator(func):
"""
This decorator handles responses to serialize the returned value
with a given model.
"""
def dump_wrapper(*args, **kwargs):
# pylint: disable=missing-docstring
response = func(*args, **kwargs)
extra_headers = None
if response is None:
if model is not None:
raise ValueError("Response cannot not be None with HTTP status %d" % code)
return flask.Response(status=code)
elif isinstance(response, flask.Response) or model is None:
return response
elif isinstance(response, tuple):
response, _code, extra_headers = unpack(response)
else:
_code = code
if HTTPStatus(_code) is code:
response = model.dump(response).data
return response, _code, extra_headers
return dump_wrapper
def decorator(func_or_class):
if code.value in http_exceptions.default_exceptions:
# If the code is handled by raising an exception, it will
# produce a response later, so we don't need to apply a useless
# wrapper.
decorated_func_or_class = func_or_class
elif isinstance(func_or_class, type):
# Handle Resource classes decoration
# pylint: disable=protected-access
func_or_class._apply_decorator_to_methods(response_serializer_decorator)
decorated_func_or_class = func_or_class
else:
decorated_func_or_class = wraps(func_or_class)(
response_serializer_decorator(func_or_class)
)
if model is None:
api_model = None
else:
if isinstance(model, Model):
api_model = model
else:
api_model = self.model(model=model)
if getattr(model, 'many', False):
api_model = [api_model]
doc_decorator = self.doc(
responses={
code.value: (description, api_model)
}
)
return doc_decorator(decorated_func_or_class)
return decorator | [
"def",
"response",
"(",
"self",
",",
"model",
"=",
"None",
",",
"code",
"=",
"HTTPStatus",
".",
"OK",
",",
"description",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"code",
"=",
"HTTPStatus",
"(",
"code",
")",
"if",
"code",
"is",
"HTTPStatus",
".",
"NO_CONTENT",
":",
"assert",
"model",
"is",
"None",
"if",
"model",
"is",
"None",
"and",
"code",
"not",
"in",
"{",
"HTTPStatus",
".",
"ACCEPTED",
",",
"HTTPStatus",
".",
"NO_CONTENT",
"}",
":",
"if",
"code",
".",
"value",
"not",
"in",
"http_exceptions",
".",
"default_exceptions",
":",
"raise",
"ValueError",
"(",
"\"`model` parameter is required for code %d\"",
"%",
"code",
")",
"model",
"=",
"self",
".",
"model",
"(",
"name",
"=",
"'HTTPError%d'",
"%",
"code",
",",
"model",
"=",
"DefaultHTTPErrorSchema",
"(",
"http_code",
"=",
"code",
")",
")",
"if",
"description",
"is",
"None",
":",
"description",
"=",
"code",
".",
"description",
"def",
"response_serializer_decorator",
"(",
"func",
")",
":",
"\"\"\"\n This decorator handles responses to serialize the returned value\n with a given model.\n \"\"\"",
"def",
"dump_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=missing-docstring",
"response",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"extra_headers",
"=",
"None",
"if",
"response",
"is",
"None",
":",
"if",
"model",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"Response cannot not be None with HTTP status %d\"",
"%",
"code",
")",
"return",
"flask",
".",
"Response",
"(",
"status",
"=",
"code",
")",
"elif",
"isinstance",
"(",
"response",
",",
"flask",
".",
"Response",
")",
"or",
"model",
"is",
"None",
":",
"return",
"response",
"elif",
"isinstance",
"(",
"response",
",",
"tuple",
")",
":",
"response",
",",
"_code",
",",
"extra_headers",
"=",
"unpack",
"(",
"response",
")",
"else",
":",
"_code",
"=",
"code",
"if",
"HTTPStatus",
"(",
"_code",
")",
"is",
"code",
":",
"response",
"=",
"model",
".",
"dump",
"(",
"response",
")",
".",
"data",
"return",
"response",
",",
"_code",
",",
"extra_headers",
"return",
"dump_wrapper",
"def",
"decorator",
"(",
"func_or_class",
")",
":",
"if",
"code",
".",
"value",
"in",
"http_exceptions",
".",
"default_exceptions",
":",
"# If the code is handled by raising an exception, it will",
"# produce a response later, so we don't need to apply a useless",
"# wrapper.",
"decorated_func_or_class",
"=",
"func_or_class",
"elif",
"isinstance",
"(",
"func_or_class",
",",
"type",
")",
":",
"# Handle Resource classes decoration",
"# pylint: disable=protected-access",
"func_or_class",
".",
"_apply_decorator_to_methods",
"(",
"response_serializer_decorator",
")",
"decorated_func_or_class",
"=",
"func_or_class",
"else",
":",
"decorated_func_or_class",
"=",
"wraps",
"(",
"func_or_class",
")",
"(",
"response_serializer_decorator",
"(",
"func_or_class",
")",
")",
"if",
"model",
"is",
"None",
":",
"api_model",
"=",
"None",
"else",
":",
"if",
"isinstance",
"(",
"model",
",",
"Model",
")",
":",
"api_model",
"=",
"model",
"else",
":",
"api_model",
"=",
"self",
".",
"model",
"(",
"model",
"=",
"model",
")",
"if",
"getattr",
"(",
"model",
",",
"'many'",
",",
"False",
")",
":",
"api_model",
"=",
"[",
"api_model",
"]",
"doc_decorator",
"=",
"self",
".",
"doc",
"(",
"responses",
"=",
"{",
"code",
".",
"value",
":",
"(",
"description",
",",
"api_model",
")",
"}",
")",
"return",
"doc_decorator",
"(",
"decorated_func_or_class",
")",
"return",
"decorator"
]
| Endpoint response OpenAPI documentation decorator.
It automatically documents HTTPError%(code)d responses with relevant
schemas.
Arguments:
model (flask_marshmallow.Schema) - it can be a class or an instance
of the class, which will be used for OpenAPI documentation
purposes. It can be omitted if ``code`` argument is set to an
error HTTP status code.
code (int) - HTTP status code which is documented.
description (str)
Example:
>>> @namespace.response(BaseTeamSchema(many=True))
... @namespace.response(code=HTTPStatus.FORBIDDEN)
... def get_teams():
... if not user.is_admin:
... abort(HTTPStatus.FORBIDDEN)
... return Team.query.all() | [
"Endpoint",
"response",
"OpenAPI",
"documentation",
"decorator",
"."
]
| 38b4a030f28e6aec374d105173aa5e9b6bd51e5e | https://github.com/Jaza/flask-restplus-patched/blob/38b4a030f28e6aec374d105173aa5e9b6bd51e5e/flask_restplus_patched/namespace.py#L96-L192 | train |
Jaza/flask-restplus-patched | flask_restplus_patched/resource.py | Resource._apply_decorator_to_methods | def _apply_decorator_to_methods(cls, decorator):
"""
This helper can apply a given decorator to all methods on the current
Resource.
NOTE: In contrast to ``Resource.method_decorators``, which has a
similar use-case, this method applies decorators directly and override
methods in-place, while the decorators listed in
``Resource.method_decorators`` are applied on every request which is
quite a waste of resources.
"""
for method in cls.methods:
method_name = method.lower()
decorated_method_func = decorator(getattr(cls, method_name))
setattr(cls, method_name, decorated_method_func) | python | def _apply_decorator_to_methods(cls, decorator):
"""
This helper can apply a given decorator to all methods on the current
Resource.
NOTE: In contrast to ``Resource.method_decorators``, which has a
similar use-case, this method applies decorators directly and override
methods in-place, while the decorators listed in
``Resource.method_decorators`` are applied on every request which is
quite a waste of resources.
"""
for method in cls.methods:
method_name = method.lower()
decorated_method_func = decorator(getattr(cls, method_name))
setattr(cls, method_name, decorated_method_func) | [
"def",
"_apply_decorator_to_methods",
"(",
"cls",
",",
"decorator",
")",
":",
"for",
"method",
"in",
"cls",
".",
"methods",
":",
"method_name",
"=",
"method",
".",
"lower",
"(",
")",
"decorated_method_func",
"=",
"decorator",
"(",
"getattr",
"(",
"cls",
",",
"method_name",
")",
")",
"setattr",
"(",
"cls",
",",
"method_name",
",",
"decorated_method_func",
")"
]
| This helper can apply a given decorator to all methods on the current
Resource.
NOTE: In contrast to ``Resource.method_decorators``, which has a
similar use-case, this method applies decorators directly and override
methods in-place, while the decorators listed in
``Resource.method_decorators`` are applied on every request which is
quite a waste of resources. | [
"This",
"helper",
"can",
"apply",
"a",
"given",
"decorator",
"to",
"all",
"methods",
"on",
"the",
"current",
"Resource",
"."
]
| 38b4a030f28e6aec374d105173aa5e9b6bd51e5e | https://github.com/Jaza/flask-restplus-patched/blob/38b4a030f28e6aec374d105173aa5e9b6bd51e5e/flask_restplus_patched/resource.py#L16-L30 | train |
Jaza/flask-restplus-patched | flask_restplus_patched/resource.py | Resource.options | def options(self, *args, **kwargs):
"""
Check which methods are allowed.
Use this method if you need to know what operations are allowed to be
performed on this endpoint, e.g. to decide wether to display a button
in your UI.
The list of allowed methods is provided in `Allow` response header.
"""
# This is a generic implementation of OPTIONS method for resources.
# This method checks every permissions provided as decorators for other
# methods to provide information about what methods `current_user` can
# use.
method_funcs = [getattr(self, m.lower()) for m in self.methods]
allowed_methods = []
request_oauth_backup = getattr(flask.request, 'oauth', None)
for method_func in method_funcs:
if getattr(method_func, '_access_restriction_decorators', None):
if not hasattr(method_func, '_cached_fake_method_func'):
fake_method_func = lambda *args, **kwargs: True
# `__name__` is used in `login_required` decorator, so it
# is required to fake this also
fake_method_func.__name__ = 'options'
# Decorate the fake method with the registered access
# restriction decorators
for decorator in method_func._access_restriction_decorators:
fake_method_func = decorator(fake_method_func)
# Cache the `fake_method_func` to avoid redoing this over
# and over again
method_func.__dict__['_cached_fake_method_func'] = fake_method_func
else:
fake_method_func = method_func._cached_fake_method_func
flask.request.oauth = None
try:
fake_method_func(self, *args, **kwargs)
except HTTPException:
# This method is not allowed, so skip it
continue
allowed_methods.append(method_func.__name__.upper())
flask.request.oauth = request_oauth_backup
return flask.Response(
status=HTTPStatus.NO_CONTENT,
headers={'Allow': ", ".join(allowed_methods)}
) | python | def options(self, *args, **kwargs):
"""
Check which methods are allowed.
Use this method if you need to know what operations are allowed to be
performed on this endpoint, e.g. to decide wether to display a button
in your UI.
The list of allowed methods is provided in `Allow` response header.
"""
# This is a generic implementation of OPTIONS method for resources.
# This method checks every permissions provided as decorators for other
# methods to provide information about what methods `current_user` can
# use.
method_funcs = [getattr(self, m.lower()) for m in self.methods]
allowed_methods = []
request_oauth_backup = getattr(flask.request, 'oauth', None)
for method_func in method_funcs:
if getattr(method_func, '_access_restriction_decorators', None):
if not hasattr(method_func, '_cached_fake_method_func'):
fake_method_func = lambda *args, **kwargs: True
# `__name__` is used in `login_required` decorator, so it
# is required to fake this also
fake_method_func.__name__ = 'options'
# Decorate the fake method with the registered access
# restriction decorators
for decorator in method_func._access_restriction_decorators:
fake_method_func = decorator(fake_method_func)
# Cache the `fake_method_func` to avoid redoing this over
# and over again
method_func.__dict__['_cached_fake_method_func'] = fake_method_func
else:
fake_method_func = method_func._cached_fake_method_func
flask.request.oauth = None
try:
fake_method_func(self, *args, **kwargs)
except HTTPException:
# This method is not allowed, so skip it
continue
allowed_methods.append(method_func.__name__.upper())
flask.request.oauth = request_oauth_backup
return flask.Response(
status=HTTPStatus.NO_CONTENT,
headers={'Allow': ", ".join(allowed_methods)}
) | [
"def",
"options",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# This is a generic implementation of OPTIONS method for resources.",
"# This method checks every permissions provided as decorators for other",
"# methods to provide information about what methods `current_user` can",
"# use.",
"method_funcs",
"=",
"[",
"getattr",
"(",
"self",
",",
"m",
".",
"lower",
"(",
")",
")",
"for",
"m",
"in",
"self",
".",
"methods",
"]",
"allowed_methods",
"=",
"[",
"]",
"request_oauth_backup",
"=",
"getattr",
"(",
"flask",
".",
"request",
",",
"'oauth'",
",",
"None",
")",
"for",
"method_func",
"in",
"method_funcs",
":",
"if",
"getattr",
"(",
"method_func",
",",
"'_access_restriction_decorators'",
",",
"None",
")",
":",
"if",
"not",
"hasattr",
"(",
"method_func",
",",
"'_cached_fake_method_func'",
")",
":",
"fake_method_func",
"=",
"lambda",
"*",
"args",
",",
"*",
"*",
"kwargs",
":",
"True",
"# `__name__` is used in `login_required` decorator, so it",
"# is required to fake this also",
"fake_method_func",
".",
"__name__",
"=",
"'options'",
"# Decorate the fake method with the registered access",
"# restriction decorators",
"for",
"decorator",
"in",
"method_func",
".",
"_access_restriction_decorators",
":",
"fake_method_func",
"=",
"decorator",
"(",
"fake_method_func",
")",
"# Cache the `fake_method_func` to avoid redoing this over",
"# and over again",
"method_func",
".",
"__dict__",
"[",
"'_cached_fake_method_func'",
"]",
"=",
"fake_method_func",
"else",
":",
"fake_method_func",
"=",
"method_func",
".",
"_cached_fake_method_func",
"flask",
".",
"request",
".",
"oauth",
"=",
"None",
"try",
":",
"fake_method_func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"HTTPException",
":",
"# This method is not allowed, so skip it",
"continue",
"allowed_methods",
".",
"append",
"(",
"method_func",
".",
"__name__",
".",
"upper",
"(",
")",
")",
"flask",
".",
"request",
".",
"oauth",
"=",
"request_oauth_backup",
"return",
"flask",
".",
"Response",
"(",
"status",
"=",
"HTTPStatus",
".",
"NO_CONTENT",
",",
"headers",
"=",
"{",
"'Allow'",
":",
"\", \"",
".",
"join",
"(",
"allowed_methods",
")",
"}",
")"
]
| Check which methods are allowed.
Use this method if you need to know what operations are allowed to be
performed on this endpoint, e.g. to decide wether to display a button
in your UI.
The list of allowed methods is provided in `Allow` response header. | [
"Check",
"which",
"methods",
"are",
"allowed",
"."
]
| 38b4a030f28e6aec374d105173aa5e9b6bd51e5e | https://github.com/Jaza/flask-restplus-patched/blob/38b4a030f28e6aec374d105173aa5e9b6bd51e5e/flask_restplus_patched/resource.py#L32-L81 | train |
Jaza/flask-restplus-patched | flask_restplus_patched/parameters.py | PatchJSONParameters.validate_patch_structure | def validate_patch_structure(self, data):
"""
Common validation of PATCH structure
Provide check that 'value' present in all operations expect it.
Provide check if 'path' is present. 'path' can be absent if provided
without '/' at the start. Supposed that if 'path' is present than it
is prepended with '/'.
Removing '/' in the beginning to simplify usage in resource.
"""
if data['op'] not in self.NO_VALUE_OPERATIONS and 'value' not in data:
raise ValidationError('value is required')
if 'path' not in data:
raise ValidationError('Path is required and must always begin with /')
else:
data['field_name'] = data['path'][1:] | python | def validate_patch_structure(self, data):
"""
Common validation of PATCH structure
Provide check that 'value' present in all operations expect it.
Provide check if 'path' is present. 'path' can be absent if provided
without '/' at the start. Supposed that if 'path' is present than it
is prepended with '/'.
Removing '/' in the beginning to simplify usage in resource.
"""
if data['op'] not in self.NO_VALUE_OPERATIONS and 'value' not in data:
raise ValidationError('value is required')
if 'path' not in data:
raise ValidationError('Path is required and must always begin with /')
else:
data['field_name'] = data['path'][1:] | [
"def",
"validate_patch_structure",
"(",
"self",
",",
"data",
")",
":",
"if",
"data",
"[",
"'op'",
"]",
"not",
"in",
"self",
".",
"NO_VALUE_OPERATIONS",
"and",
"'value'",
"not",
"in",
"data",
":",
"raise",
"ValidationError",
"(",
"'value is required'",
")",
"if",
"'path'",
"not",
"in",
"data",
":",
"raise",
"ValidationError",
"(",
"'Path is required and must always begin with /'",
")",
"else",
":",
"data",
"[",
"'field_name'",
"]",
"=",
"data",
"[",
"'path'",
"]",
"[",
"1",
":",
"]"
]
| Common validation of PATCH structure
Provide check that 'value' present in all operations expect it.
Provide check if 'path' is present. 'path' can be absent if provided
without '/' at the start. Supposed that if 'path' is present than it
is prepended with '/'.
Removing '/' in the beginning to simplify usage in resource. | [
"Common",
"validation",
"of",
"PATCH",
"structure"
]
| 38b4a030f28e6aec374d105173aa5e9b6bd51e5e | https://github.com/Jaza/flask-restplus-patched/blob/38b4a030f28e6aec374d105173aa5e9b6bd51e5e/flask_restplus_patched/parameters.py#L97-L114 | train |
Jaza/flask-restplus-patched | flask_restplus_patched/parameters.py | PatchJSONParameters.perform_patch | def perform_patch(cls, operations, obj, state=None):
"""
Performs all necessary operations by calling class methods with
corresponding names.
"""
if state is None:
state = {}
for operation in operations:
if not cls._process_patch_operation(operation, obj=obj, state=state):
log.info(
"%s patching has been stopped because of unknown operation %s",
obj.__class__.__name__,
operation
)
raise ValidationError(
"Failed to update %s details. Operation %s could not succeed." % (
obj.__class__.__name__,
operation
)
)
return True | python | def perform_patch(cls, operations, obj, state=None):
"""
Performs all necessary operations by calling class methods with
corresponding names.
"""
if state is None:
state = {}
for operation in operations:
if not cls._process_patch_operation(operation, obj=obj, state=state):
log.info(
"%s patching has been stopped because of unknown operation %s",
obj.__class__.__name__,
operation
)
raise ValidationError(
"Failed to update %s details. Operation %s could not succeed." % (
obj.__class__.__name__,
operation
)
)
return True | [
"def",
"perform_patch",
"(",
"cls",
",",
"operations",
",",
"obj",
",",
"state",
"=",
"None",
")",
":",
"if",
"state",
"is",
"None",
":",
"state",
"=",
"{",
"}",
"for",
"operation",
"in",
"operations",
":",
"if",
"not",
"cls",
".",
"_process_patch_operation",
"(",
"operation",
",",
"obj",
"=",
"obj",
",",
"state",
"=",
"state",
")",
":",
"log",
".",
"info",
"(",
"\"%s patching has been stopped because of unknown operation %s\"",
",",
"obj",
".",
"__class__",
".",
"__name__",
",",
"operation",
")",
"raise",
"ValidationError",
"(",
"\"Failed to update %s details. Operation %s could not succeed.\"",
"%",
"(",
"obj",
".",
"__class__",
".",
"__name__",
",",
"operation",
")",
")",
"return",
"True"
]
| Performs all necessary operations by calling class methods with
corresponding names. | [
"Performs",
"all",
"necessary",
"operations",
"by",
"calling",
"class",
"methods",
"with",
"corresponding",
"names",
"."
]
| 38b4a030f28e6aec374d105173aa5e9b6bd51e5e | https://github.com/Jaza/flask-restplus-patched/blob/38b4a030f28e6aec374d105173aa5e9b6bd51e5e/flask_restplus_patched/parameters.py#L117-L137 | train |
Jaza/flask-restplus-patched | flask_restplus_patched/parameters.py | PatchJSONParameters.replace | def replace(cls, obj, field, value, state):
"""
This is method for replace operation. It is separated to provide a
possibility to easily override it in your Parameters.
Args:
obj (object): an instance to change.
field (str): field name
value (str): new value
state (dict): inter-operations state storage
Returns:
processing_status (bool): True
"""
if not hasattr(obj, field):
raise ValidationError("Field '%s' does not exist, so it cannot be patched" % field)
setattr(obj, field, value)
return True | python | def replace(cls, obj, field, value, state):
"""
This is method for replace operation. It is separated to provide a
possibility to easily override it in your Parameters.
Args:
obj (object): an instance to change.
field (str): field name
value (str): new value
state (dict): inter-operations state storage
Returns:
processing_status (bool): True
"""
if not hasattr(obj, field):
raise ValidationError("Field '%s' does not exist, so it cannot be patched" % field)
setattr(obj, field, value)
return True | [
"def",
"replace",
"(",
"cls",
",",
"obj",
",",
"field",
",",
"value",
",",
"state",
")",
":",
"if",
"not",
"hasattr",
"(",
"obj",
",",
"field",
")",
":",
"raise",
"ValidationError",
"(",
"\"Field '%s' does not exist, so it cannot be patched\"",
"%",
"field",
")",
"setattr",
"(",
"obj",
",",
"field",
",",
"value",
")",
"return",
"True"
]
| This is method for replace operation. It is separated to provide a
possibility to easily override it in your Parameters.
Args:
obj (object): an instance to change.
field (str): field name
value (str): new value
state (dict): inter-operations state storage
Returns:
processing_status (bool): True | [
"This",
"is",
"method",
"for",
"replace",
"operation",
".",
"It",
"is",
"separated",
"to",
"provide",
"a",
"possibility",
"to",
"easily",
"override",
"it",
"in",
"your",
"Parameters",
"."
]
| 38b4a030f28e6aec374d105173aa5e9b6bd51e5e | https://github.com/Jaza/flask-restplus-patched/blob/38b4a030f28e6aec374d105173aa5e9b6bd51e5e/flask_restplus_patched/parameters.py#L173-L190 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/discourse.py | DiscourseEnrich.__related_categories | def __related_categories(self, category_id):
""" Get all related categories to a given one """
related = []
for cat in self.categories_tree:
if category_id in self.categories_tree[cat]:
related.append(self.categories[cat])
return related | python | def __related_categories(self, category_id):
""" Get all related categories to a given one """
related = []
for cat in self.categories_tree:
if category_id in self.categories_tree[cat]:
related.append(self.categories[cat])
return related | [
"def",
"__related_categories",
"(",
"self",
",",
"category_id",
")",
":",
"related",
"=",
"[",
"]",
"for",
"cat",
"in",
"self",
".",
"categories_tree",
":",
"if",
"category_id",
"in",
"self",
".",
"categories_tree",
"[",
"cat",
"]",
":",
"related",
".",
"append",
"(",
"self",
".",
"categories",
"[",
"cat",
"]",
")",
"return",
"related"
]
| Get all related categories to a given one | [
"Get",
"all",
"related",
"categories",
"to",
"a",
"given",
"one"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/discourse.py#L148-L154 | train |
chaoss/grimoirelab-elk | grimoire_elk/track_items.py | _create_projects_file | def _create_projects_file(project_name, data_source, items):
""" Create a projects file from the items origin data """
repositories = []
for item in items:
if item['origin'] not in repositories:
repositories.append(item['origin'])
projects = {
project_name: {
data_source: repositories
}
}
projects_file, projects_file_path = tempfile.mkstemp(prefix='track_items_')
with open(projects_file_path, "w") as pfile:
json.dump(projects, pfile, indent=True)
return projects_file_path | python | def _create_projects_file(project_name, data_source, items):
""" Create a projects file from the items origin data """
repositories = []
for item in items:
if item['origin'] not in repositories:
repositories.append(item['origin'])
projects = {
project_name: {
data_source: repositories
}
}
projects_file, projects_file_path = tempfile.mkstemp(prefix='track_items_')
with open(projects_file_path, "w") as pfile:
json.dump(projects, pfile, indent=True)
return projects_file_path | [
"def",
"_create_projects_file",
"(",
"project_name",
",",
"data_source",
",",
"items",
")",
":",
"repositories",
"=",
"[",
"]",
"for",
"item",
"in",
"items",
":",
"if",
"item",
"[",
"'origin'",
"]",
"not",
"in",
"repositories",
":",
"repositories",
".",
"append",
"(",
"item",
"[",
"'origin'",
"]",
")",
"projects",
"=",
"{",
"project_name",
":",
"{",
"data_source",
":",
"repositories",
"}",
"}",
"projects_file",
",",
"projects_file_path",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"'track_items_'",
")",
"with",
"open",
"(",
"projects_file_path",
",",
"\"w\"",
")",
"as",
"pfile",
":",
"json",
".",
"dump",
"(",
"projects",
",",
"pfile",
",",
"indent",
"=",
"True",
")",
"return",
"projects_file_path"
]
| Create a projects file from the items origin data | [
"Create",
"a",
"projects",
"file",
"from",
"the",
"items",
"origin",
"data"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/track_items.py#L194-L212 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/dockerhub.py | DockerHubEnrich.enrich_items | def enrich_items(self, ocean_backend, events=False):
""" A custom enrich items is needed because apart from the enriched
events from raw items, a image item with the last data for an image
must be created """
max_items = self.elastic.max_items_bulk
current = 0
total = 0
bulk_json = ""
items = ocean_backend.fetch()
images_items = {}
url = self.elastic.index_url + '/items/_bulk'
logger.debug("Adding items to %s (in %i packs)", self.elastic.anonymize_url(url), max_items)
for item in items:
if current >= max_items:
total += self.elastic.safe_put_bulk(url, bulk_json)
json_size = sys.getsizeof(bulk_json) / (1024 * 1024)
logger.debug("Added %i items to %s (%0.2f MB)", total, self.elastic.anonymize_url(url), json_size)
bulk_json = ""
current = 0
rich_item = self.get_rich_item(item)
data_json = json.dumps(rich_item)
bulk_json += '{"index" : {"_id" : "%s" } }\n' % \
(item[self.get_field_unique_id()])
bulk_json += data_json + "\n" # Bulk document
current += 1
if rich_item['id'] not in images_items:
# Let's transform the rich_event in a rich_image
rich_item['is_docker_image'] = 1
rich_item['is_event'] = 0
images_items[rich_item['id']] = rich_item
else:
image_date = images_items[rich_item['id']]['last_updated']
if image_date <= rich_item['last_updated']:
# This event is newer for the image
rich_item['is_docker_image'] = 1
rich_item['is_event'] = 0
images_items[rich_item['id']] = rich_item
if current > 0:
total += self.elastic.safe_put_bulk(url, bulk_json)
if total == 0:
# No items enriched, nothing to upload to ES
return total
# Time to upload the images enriched items. The id is uuid+"_image"
# Normally we are enriching events for a unique image so all images
# data can be upload in one query
for image in images_items:
data = images_items[image]
data_json = json.dumps(data)
bulk_json += '{"index" : {"_id" : "%s" } }\n' % \
(data['id'] + "_image")
bulk_json += data_json + "\n" # Bulk document
total += self.elastic.safe_put_bulk(url, bulk_json)
return total | python | def enrich_items(self, ocean_backend, events=False):
""" A custom enrich items is needed because apart from the enriched
events from raw items, a image item with the last data for an image
must be created """
max_items = self.elastic.max_items_bulk
current = 0
total = 0
bulk_json = ""
items = ocean_backend.fetch()
images_items = {}
url = self.elastic.index_url + '/items/_bulk'
logger.debug("Adding items to %s (in %i packs)", self.elastic.anonymize_url(url), max_items)
for item in items:
if current >= max_items:
total += self.elastic.safe_put_bulk(url, bulk_json)
json_size = sys.getsizeof(bulk_json) / (1024 * 1024)
logger.debug("Added %i items to %s (%0.2f MB)", total, self.elastic.anonymize_url(url), json_size)
bulk_json = ""
current = 0
rich_item = self.get_rich_item(item)
data_json = json.dumps(rich_item)
bulk_json += '{"index" : {"_id" : "%s" } }\n' % \
(item[self.get_field_unique_id()])
bulk_json += data_json + "\n" # Bulk document
current += 1
if rich_item['id'] not in images_items:
# Let's transform the rich_event in a rich_image
rich_item['is_docker_image'] = 1
rich_item['is_event'] = 0
images_items[rich_item['id']] = rich_item
else:
image_date = images_items[rich_item['id']]['last_updated']
if image_date <= rich_item['last_updated']:
# This event is newer for the image
rich_item['is_docker_image'] = 1
rich_item['is_event'] = 0
images_items[rich_item['id']] = rich_item
if current > 0:
total += self.elastic.safe_put_bulk(url, bulk_json)
if total == 0:
# No items enriched, nothing to upload to ES
return total
# Time to upload the images enriched items. The id is uuid+"_image"
# Normally we are enriching events for a unique image so all images
# data can be upload in one query
for image in images_items:
data = images_items[image]
data_json = json.dumps(data)
bulk_json += '{"index" : {"_id" : "%s" } }\n' % \
(data['id'] + "_image")
bulk_json += data_json + "\n" # Bulk document
total += self.elastic.safe_put_bulk(url, bulk_json)
return total | [
"def",
"enrich_items",
"(",
"self",
",",
"ocean_backend",
",",
"events",
"=",
"False",
")",
":",
"max_items",
"=",
"self",
".",
"elastic",
".",
"max_items_bulk",
"current",
"=",
"0",
"total",
"=",
"0",
"bulk_json",
"=",
"\"\"",
"items",
"=",
"ocean_backend",
".",
"fetch",
"(",
")",
"images_items",
"=",
"{",
"}",
"url",
"=",
"self",
".",
"elastic",
".",
"index_url",
"+",
"'/items/_bulk'",
"logger",
".",
"debug",
"(",
"\"Adding items to %s (in %i packs)\"",
",",
"self",
".",
"elastic",
".",
"anonymize_url",
"(",
"url",
")",
",",
"max_items",
")",
"for",
"item",
"in",
"items",
":",
"if",
"current",
">=",
"max_items",
":",
"total",
"+=",
"self",
".",
"elastic",
".",
"safe_put_bulk",
"(",
"url",
",",
"bulk_json",
")",
"json_size",
"=",
"sys",
".",
"getsizeof",
"(",
"bulk_json",
")",
"/",
"(",
"1024",
"*",
"1024",
")",
"logger",
".",
"debug",
"(",
"\"Added %i items to %s (%0.2f MB)\"",
",",
"total",
",",
"self",
".",
"elastic",
".",
"anonymize_url",
"(",
"url",
")",
",",
"json_size",
")",
"bulk_json",
"=",
"\"\"",
"current",
"=",
"0",
"rich_item",
"=",
"self",
".",
"get_rich_item",
"(",
"item",
")",
"data_json",
"=",
"json",
".",
"dumps",
"(",
"rich_item",
")",
"bulk_json",
"+=",
"'{\"index\" : {\"_id\" : \"%s\" } }\\n'",
"%",
"(",
"item",
"[",
"self",
".",
"get_field_unique_id",
"(",
")",
"]",
")",
"bulk_json",
"+=",
"data_json",
"+",
"\"\\n\"",
"# Bulk document",
"current",
"+=",
"1",
"if",
"rich_item",
"[",
"'id'",
"]",
"not",
"in",
"images_items",
":",
"# Let's transform the rich_event in a rich_image",
"rich_item",
"[",
"'is_docker_image'",
"]",
"=",
"1",
"rich_item",
"[",
"'is_event'",
"]",
"=",
"0",
"images_items",
"[",
"rich_item",
"[",
"'id'",
"]",
"]",
"=",
"rich_item",
"else",
":",
"image_date",
"=",
"images_items",
"[",
"rich_item",
"[",
"'id'",
"]",
"]",
"[",
"'last_updated'",
"]",
"if",
"image_date",
"<=",
"rich_item",
"[",
"'last_updated'",
"]",
":",
"# This event is newer for the image",
"rich_item",
"[",
"'is_docker_image'",
"]",
"=",
"1",
"rich_item",
"[",
"'is_event'",
"]",
"=",
"0",
"images_items",
"[",
"rich_item",
"[",
"'id'",
"]",
"]",
"=",
"rich_item",
"if",
"current",
">",
"0",
":",
"total",
"+=",
"self",
".",
"elastic",
".",
"safe_put_bulk",
"(",
"url",
",",
"bulk_json",
")",
"if",
"total",
"==",
"0",
":",
"# No items enriched, nothing to upload to ES",
"return",
"total",
"# Time to upload the images enriched items. The id is uuid+\"_image\"",
"# Normally we are enriching events for a unique image so all images",
"# data can be upload in one query",
"for",
"image",
"in",
"images_items",
":",
"data",
"=",
"images_items",
"[",
"image",
"]",
"data_json",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"bulk_json",
"+=",
"'{\"index\" : {\"_id\" : \"%s\" } }\\n'",
"%",
"(",
"data",
"[",
"'id'",
"]",
"+",
"\"_image\"",
")",
"bulk_json",
"+=",
"data_json",
"+",
"\"\\n\"",
"# Bulk document",
"total",
"+=",
"self",
".",
"elastic",
".",
"safe_put_bulk",
"(",
"url",
",",
"bulk_json",
")",
"return",
"total"
]
| A custom enrich items is needed because apart from the enriched
events from raw items, a image item with the last data for an image
must be created | [
"A",
"custom",
"enrich",
"items",
"is",
"needed",
"because",
"apart",
"from",
"the",
"enriched",
"events",
"from",
"raw",
"items",
"a",
"image",
"item",
"with",
"the",
"last",
"data",
"for",
"an",
"image",
"must",
"be",
"created"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/dockerhub.py#L125-L188 | train |
chaoss/grimoirelab-elk | utils/gh2k.py | get_owner_repos_url | def get_owner_repos_url(owner, token):
""" The owner could be a org or a user.
It waits if need to have rate limit.
Also it fixes a djando issue changing - with _
"""
url_org = GITHUB_API_URL + "/orgs/" + owner + "/repos"
url_user = GITHUB_API_URL + "/users/" + owner + "/repos"
url_owner = url_org # Use org by default
try:
r = requests.get(url_org,
params=get_payload(),
headers=get_headers(token))
r.raise_for_status()
except requests.exceptions.HTTPError as e:
if r.status_code == 403:
rate_limit_reset_ts = datetime.fromtimestamp(int(r.headers['X-RateLimit-Reset']))
seconds_to_reset = (rate_limit_reset_ts - datetime.utcnow()).seconds + 1
logging.info("GitHub rate limit exhausted. Waiting %i secs for rate limit reset." % (seconds_to_reset))
sleep(seconds_to_reset)
else:
# owner is not an org, try with a user
url_owner = url_user
return url_owner | python | def get_owner_repos_url(owner, token):
""" The owner could be a org or a user.
It waits if need to have rate limit.
Also it fixes a djando issue changing - with _
"""
url_org = GITHUB_API_URL + "/orgs/" + owner + "/repos"
url_user = GITHUB_API_URL + "/users/" + owner + "/repos"
url_owner = url_org # Use org by default
try:
r = requests.get(url_org,
params=get_payload(),
headers=get_headers(token))
r.raise_for_status()
except requests.exceptions.HTTPError as e:
if r.status_code == 403:
rate_limit_reset_ts = datetime.fromtimestamp(int(r.headers['X-RateLimit-Reset']))
seconds_to_reset = (rate_limit_reset_ts - datetime.utcnow()).seconds + 1
logging.info("GitHub rate limit exhausted. Waiting %i secs for rate limit reset." % (seconds_to_reset))
sleep(seconds_to_reset)
else:
# owner is not an org, try with a user
url_owner = url_user
return url_owner | [
"def",
"get_owner_repos_url",
"(",
"owner",
",",
"token",
")",
":",
"url_org",
"=",
"GITHUB_API_URL",
"+",
"\"/orgs/\"",
"+",
"owner",
"+",
"\"/repos\"",
"url_user",
"=",
"GITHUB_API_URL",
"+",
"\"/users/\"",
"+",
"owner",
"+",
"\"/repos\"",
"url_owner",
"=",
"url_org",
"# Use org by default",
"try",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"url_org",
",",
"params",
"=",
"get_payload",
"(",
")",
",",
"headers",
"=",
"get_headers",
"(",
"token",
")",
")",
"r",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"e",
":",
"if",
"r",
".",
"status_code",
"==",
"403",
":",
"rate_limit_reset_ts",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"int",
"(",
"r",
".",
"headers",
"[",
"'X-RateLimit-Reset'",
"]",
")",
")",
"seconds_to_reset",
"=",
"(",
"rate_limit_reset_ts",
"-",
"datetime",
".",
"utcnow",
"(",
")",
")",
".",
"seconds",
"+",
"1",
"logging",
".",
"info",
"(",
"\"GitHub rate limit exhausted. Waiting %i secs for rate limit reset.\"",
"%",
"(",
"seconds_to_reset",
")",
")",
"sleep",
"(",
"seconds_to_reset",
")",
"else",
":",
"# owner is not an org, try with a user",
"url_owner",
"=",
"url_user",
"return",
"url_owner"
]
| The owner could be a org or a user.
It waits if need to have rate limit.
Also it fixes a djando issue changing - with _ | [
"The",
"owner",
"could",
"be",
"a",
"org",
"or",
"a",
"user",
".",
"It",
"waits",
"if",
"need",
"to",
"have",
"rate",
"limit",
".",
"Also",
"it",
"fixes",
"a",
"djando",
"issue",
"changing",
"-",
"with",
"_"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/utils/gh2k.py#L98-L123 | train |
chaoss/grimoirelab-elk | utils/gh2k.py | get_repositores | def get_repositores(owner_url, token, nrepos):
""" owner could be an org or and user """
all_repos = []
url = owner_url
while True:
logging.debug("Getting repos from: %s" % (url))
try:
r = requests.get(url,
params=get_payload(),
headers=get_headers(token))
r.raise_for_status()
all_repos += r.json()
logging.debug("Rate limit: %s" % (r.headers['X-RateLimit-Remaining']))
if 'next' not in r.links:
break
url = r.links['next']['url'] # Loving requests :)
except requests.exceptions.ConnectionError:
logging.error("Can not connect to GitHub")
break
# Remove forks
nrepos_recent = [repo for repo in all_repos if not repo['fork']]
# Sort by updated_at and limit to nrepos
nrepos_sorted = sorted(nrepos_recent, key=lambda repo: parser.parse(repo['updated_at']), reverse=True)
nrepos_sorted = nrepos_sorted[0:nrepos]
# First the small repositories to feedback the user quickly
nrepos_sorted = sorted(nrepos_sorted, key=lambda repo: repo['size'])
for repo in nrepos_sorted:
logging.debug("%s %i %s" % (repo['updated_at'], repo['size'], repo['name']))
return nrepos_sorted | python | def get_repositores(owner_url, token, nrepos):
""" owner could be an org or and user """
all_repos = []
url = owner_url
while True:
logging.debug("Getting repos from: %s" % (url))
try:
r = requests.get(url,
params=get_payload(),
headers=get_headers(token))
r.raise_for_status()
all_repos += r.json()
logging.debug("Rate limit: %s" % (r.headers['X-RateLimit-Remaining']))
if 'next' not in r.links:
break
url = r.links['next']['url'] # Loving requests :)
except requests.exceptions.ConnectionError:
logging.error("Can not connect to GitHub")
break
# Remove forks
nrepos_recent = [repo for repo in all_repos if not repo['fork']]
# Sort by updated_at and limit to nrepos
nrepos_sorted = sorted(nrepos_recent, key=lambda repo: parser.parse(repo['updated_at']), reverse=True)
nrepos_sorted = nrepos_sorted[0:nrepos]
# First the small repositories to feedback the user quickly
nrepos_sorted = sorted(nrepos_sorted, key=lambda repo: repo['size'])
for repo in nrepos_sorted:
logging.debug("%s %i %s" % (repo['updated_at'], repo['size'], repo['name']))
return nrepos_sorted | [
"def",
"get_repositores",
"(",
"owner_url",
",",
"token",
",",
"nrepos",
")",
":",
"all_repos",
"=",
"[",
"]",
"url",
"=",
"owner_url",
"while",
"True",
":",
"logging",
".",
"debug",
"(",
"\"Getting repos from: %s\"",
"%",
"(",
"url",
")",
")",
"try",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"get_payload",
"(",
")",
",",
"headers",
"=",
"get_headers",
"(",
"token",
")",
")",
"r",
".",
"raise_for_status",
"(",
")",
"all_repos",
"+=",
"r",
".",
"json",
"(",
")",
"logging",
".",
"debug",
"(",
"\"Rate limit: %s\"",
"%",
"(",
"r",
".",
"headers",
"[",
"'X-RateLimit-Remaining'",
"]",
")",
")",
"if",
"'next'",
"not",
"in",
"r",
".",
"links",
":",
"break",
"url",
"=",
"r",
".",
"links",
"[",
"'next'",
"]",
"[",
"'url'",
"]",
"# Loving requests :)",
"except",
"requests",
".",
"exceptions",
".",
"ConnectionError",
":",
"logging",
".",
"error",
"(",
"\"Can not connect to GitHub\"",
")",
"break",
"# Remove forks",
"nrepos_recent",
"=",
"[",
"repo",
"for",
"repo",
"in",
"all_repos",
"if",
"not",
"repo",
"[",
"'fork'",
"]",
"]",
"# Sort by updated_at and limit to nrepos",
"nrepos_sorted",
"=",
"sorted",
"(",
"nrepos_recent",
",",
"key",
"=",
"lambda",
"repo",
":",
"parser",
".",
"parse",
"(",
"repo",
"[",
"'updated_at'",
"]",
")",
",",
"reverse",
"=",
"True",
")",
"nrepos_sorted",
"=",
"nrepos_sorted",
"[",
"0",
":",
"nrepos",
"]",
"# First the small repositories to feedback the user quickly",
"nrepos_sorted",
"=",
"sorted",
"(",
"nrepos_sorted",
",",
"key",
"=",
"lambda",
"repo",
":",
"repo",
"[",
"'size'",
"]",
")",
"for",
"repo",
"in",
"nrepos_sorted",
":",
"logging",
".",
"debug",
"(",
"\"%s %i %s\"",
"%",
"(",
"repo",
"[",
"'updated_at'",
"]",
",",
"repo",
"[",
"'size'",
"]",
",",
"repo",
"[",
"'name'",
"]",
")",
")",
"return",
"nrepos_sorted"
]
| owner could be an org or and user | [
"owner",
"could",
"be",
"an",
"org",
"or",
"and",
"user"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/utils/gh2k.py#L126-L161 | train |
chaoss/grimoirelab-elk | utils/gh2k.py | publish_twitter | def publish_twitter(twitter_contact, owner):
""" Publish in twitter the dashboard """
dashboard_url = CAULDRON_DASH_URL + "/%s" % (owner)
tweet = "@%s your http://cauldron.io dashboard for #%s at GitHub is ready: %s. Check it out! #oscon" \
% (twitter_contact, owner, dashboard_url)
status = quote_plus(tweet)
oauth = get_oauth()
r = requests.post(url="https://api.twitter.com/1.1/statuses/update.json?status=" + status, auth=oauth) | python | def publish_twitter(twitter_contact, owner):
""" Publish in twitter the dashboard """
dashboard_url = CAULDRON_DASH_URL + "/%s" % (owner)
tweet = "@%s your http://cauldron.io dashboard for #%s at GitHub is ready: %s. Check it out! #oscon" \
% (twitter_contact, owner, dashboard_url)
status = quote_plus(tweet)
oauth = get_oauth()
r = requests.post(url="https://api.twitter.com/1.1/statuses/update.json?status=" + status, auth=oauth) | [
"def",
"publish_twitter",
"(",
"twitter_contact",
",",
"owner",
")",
":",
"dashboard_url",
"=",
"CAULDRON_DASH_URL",
"+",
"\"/%s\"",
"%",
"(",
"owner",
")",
"tweet",
"=",
"\"@%s your http://cauldron.io dashboard for #%s at GitHub is ready: %s. Check it out! #oscon\"",
"%",
"(",
"twitter_contact",
",",
"owner",
",",
"dashboard_url",
")",
"status",
"=",
"quote_plus",
"(",
"tweet",
")",
"oauth",
"=",
"get_oauth",
"(",
")",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
"=",
"\"https://api.twitter.com/1.1/statuses/update.json?status=\"",
"+",
"status",
",",
"auth",
"=",
"oauth",
")"
]
| Publish in twitter the dashboard | [
"Publish",
"in",
"twitter",
"the",
"dashboard"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/utils/gh2k.py#L253-L260 | train |
chaoss/grimoirelab-elk | grimoire_elk/raw/mediawiki.py | MediaWikiOcean.get_perceval_params_from_url | def get_perceval_params_from_url(cls, urls):
""" Get the perceval params given the URLs for the data source """
params = []
dparam = cls.get_arthur_params_from_url(urls)
params.append(dparam["url"])
return params | python | def get_perceval_params_from_url(cls, urls):
""" Get the perceval params given the URLs for the data source """
params = []
dparam = cls.get_arthur_params_from_url(urls)
params.append(dparam["url"])
return params | [
"def",
"get_perceval_params_from_url",
"(",
"cls",
",",
"urls",
")",
":",
"params",
"=",
"[",
"]",
"dparam",
"=",
"cls",
".",
"get_arthur_params_from_url",
"(",
"urls",
")",
"params",
".",
"append",
"(",
"dparam",
"[",
"\"url\"",
"]",
")",
"return",
"params"
]
| Get the perceval params given the URLs for the data source | [
"Get",
"the",
"perceval",
"params",
"given",
"the",
"URLs",
"for",
"the",
"data",
"source"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/raw/mediawiki.py#L66-L73 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/sortinghat_gelk.py | SortingHat.add_identity | def add_identity(cls, db, identity, backend):
""" Load and identity list from backend in Sorting Hat """
uuid = None
try:
uuid = api.add_identity(db, backend, identity['email'],
identity['name'], identity['username'])
logger.debug("New sortinghat identity %s %s,%s,%s ",
uuid, identity['username'], identity['name'], identity['email'])
profile = {"name": identity['name'] if identity['name'] else identity['username'],
"email": identity['email']}
api.edit_profile(db, uuid, **profile)
except AlreadyExistsError as ex:
uuid = ex.eid
except InvalidValueError as ex:
logger.warning("Trying to add a None identity. Ignoring it.")
except UnicodeEncodeError as ex:
logger.warning("UnicodeEncodeError. Ignoring it. %s %s %s",
identity['email'], identity['name'],
identity['username'])
except Exception as ex:
logger.warning("Unknown exception adding identity. Ignoring it. %s %s %s",
identity['email'], identity['name'],
identity['username'], exc_info=True)
if 'company' in identity and identity['company'] is not None:
try:
api.add_organization(db, identity['company'])
api.add_enrollment(db, uuid, identity['company'],
datetime(1900, 1, 1),
datetime(2100, 1, 1))
except AlreadyExistsError:
pass
return uuid | python | def add_identity(cls, db, identity, backend):
""" Load and identity list from backend in Sorting Hat """
uuid = None
try:
uuid = api.add_identity(db, backend, identity['email'],
identity['name'], identity['username'])
logger.debug("New sortinghat identity %s %s,%s,%s ",
uuid, identity['username'], identity['name'], identity['email'])
profile = {"name": identity['name'] if identity['name'] else identity['username'],
"email": identity['email']}
api.edit_profile(db, uuid, **profile)
except AlreadyExistsError as ex:
uuid = ex.eid
except InvalidValueError as ex:
logger.warning("Trying to add a None identity. Ignoring it.")
except UnicodeEncodeError as ex:
logger.warning("UnicodeEncodeError. Ignoring it. %s %s %s",
identity['email'], identity['name'],
identity['username'])
except Exception as ex:
logger.warning("Unknown exception adding identity. Ignoring it. %s %s %s",
identity['email'], identity['name'],
identity['username'], exc_info=True)
if 'company' in identity and identity['company'] is not None:
try:
api.add_organization(db, identity['company'])
api.add_enrollment(db, uuid, identity['company'],
datetime(1900, 1, 1),
datetime(2100, 1, 1))
except AlreadyExistsError:
pass
return uuid | [
"def",
"add_identity",
"(",
"cls",
",",
"db",
",",
"identity",
",",
"backend",
")",
":",
"uuid",
"=",
"None",
"try",
":",
"uuid",
"=",
"api",
".",
"add_identity",
"(",
"db",
",",
"backend",
",",
"identity",
"[",
"'email'",
"]",
",",
"identity",
"[",
"'name'",
"]",
",",
"identity",
"[",
"'username'",
"]",
")",
"logger",
".",
"debug",
"(",
"\"New sortinghat identity %s %s,%s,%s \"",
",",
"uuid",
",",
"identity",
"[",
"'username'",
"]",
",",
"identity",
"[",
"'name'",
"]",
",",
"identity",
"[",
"'email'",
"]",
")",
"profile",
"=",
"{",
"\"name\"",
":",
"identity",
"[",
"'name'",
"]",
"if",
"identity",
"[",
"'name'",
"]",
"else",
"identity",
"[",
"'username'",
"]",
",",
"\"email\"",
":",
"identity",
"[",
"'email'",
"]",
"}",
"api",
".",
"edit_profile",
"(",
"db",
",",
"uuid",
",",
"*",
"*",
"profile",
")",
"except",
"AlreadyExistsError",
"as",
"ex",
":",
"uuid",
"=",
"ex",
".",
"eid",
"except",
"InvalidValueError",
"as",
"ex",
":",
"logger",
".",
"warning",
"(",
"\"Trying to add a None identity. Ignoring it.\"",
")",
"except",
"UnicodeEncodeError",
"as",
"ex",
":",
"logger",
".",
"warning",
"(",
"\"UnicodeEncodeError. Ignoring it. %s %s %s\"",
",",
"identity",
"[",
"'email'",
"]",
",",
"identity",
"[",
"'name'",
"]",
",",
"identity",
"[",
"'username'",
"]",
")",
"except",
"Exception",
"as",
"ex",
":",
"logger",
".",
"warning",
"(",
"\"Unknown exception adding identity. Ignoring it. %s %s %s\"",
",",
"identity",
"[",
"'email'",
"]",
",",
"identity",
"[",
"'name'",
"]",
",",
"identity",
"[",
"'username'",
"]",
",",
"exc_info",
"=",
"True",
")",
"if",
"'company'",
"in",
"identity",
"and",
"identity",
"[",
"'company'",
"]",
"is",
"not",
"None",
":",
"try",
":",
"api",
".",
"add_organization",
"(",
"db",
",",
"identity",
"[",
"'company'",
"]",
")",
"api",
".",
"add_enrollment",
"(",
"db",
",",
"uuid",
",",
"identity",
"[",
"'company'",
"]",
",",
"datetime",
"(",
"1900",
",",
"1",
",",
"1",
")",
",",
"datetime",
"(",
"2100",
",",
"1",
",",
"1",
")",
")",
"except",
"AlreadyExistsError",
":",
"pass",
"return",
"uuid"
]
| Load and identity list from backend in Sorting Hat | [
"Load",
"and",
"identity",
"list",
"from",
"backend",
"in",
"Sorting",
"Hat"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/sortinghat_gelk.py#L64-L102 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/sortinghat_gelk.py | SortingHat.add_identities | def add_identities(cls, db, identities, backend):
""" Load identities list from backend in Sorting Hat """
logger.info("Adding the identities to SortingHat")
total = 0
for identity in identities:
try:
cls.add_identity(db, identity, backend)
total += 1
except Exception as e:
logger.error("Unexcepted error when adding identities: %s" % e)
continue
logger.info("Total identities added to SH: %i", total) | python | def add_identities(cls, db, identities, backend):
""" Load identities list from backend in Sorting Hat """
logger.info("Adding the identities to SortingHat")
total = 0
for identity in identities:
try:
cls.add_identity(db, identity, backend)
total += 1
except Exception as e:
logger.error("Unexcepted error when adding identities: %s" % e)
continue
logger.info("Total identities added to SH: %i", total) | [
"def",
"add_identities",
"(",
"cls",
",",
"db",
",",
"identities",
",",
"backend",
")",
":",
"logger",
".",
"info",
"(",
"\"Adding the identities to SortingHat\"",
")",
"total",
"=",
"0",
"for",
"identity",
"in",
"identities",
":",
"try",
":",
"cls",
".",
"add_identity",
"(",
"db",
",",
"identity",
",",
"backend",
")",
"total",
"+=",
"1",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"Unexcepted error when adding identities: %s\"",
"%",
"e",
")",
"continue",
"logger",
".",
"info",
"(",
"\"Total identities added to SH: %i\"",
",",
"total",
")"
]
| Load identities list from backend in Sorting Hat | [
"Load",
"identities",
"list",
"from",
"backend",
"in",
"Sorting",
"Hat"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/sortinghat_gelk.py#L105-L120 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/sortinghat_gelk.py | SortingHat.remove_identity | def remove_identity(cls, sh_db, ident_id):
"""Delete an identity from SortingHat.
:param sh_db: SortingHat database
:param ident_id: identity identifier
"""
success = False
try:
api.delete_identity(sh_db, ident_id)
logger.debug("Identity %s deleted", ident_id)
success = True
except Exception as e:
logger.debug("Identity not deleted due to %s", str(e))
return success | python | def remove_identity(cls, sh_db, ident_id):
"""Delete an identity from SortingHat.
:param sh_db: SortingHat database
:param ident_id: identity identifier
"""
success = False
try:
api.delete_identity(sh_db, ident_id)
logger.debug("Identity %s deleted", ident_id)
success = True
except Exception as e:
logger.debug("Identity not deleted due to %s", str(e))
return success | [
"def",
"remove_identity",
"(",
"cls",
",",
"sh_db",
",",
"ident_id",
")",
":",
"success",
"=",
"False",
"try",
":",
"api",
".",
"delete_identity",
"(",
"sh_db",
",",
"ident_id",
")",
"logger",
".",
"debug",
"(",
"\"Identity %s deleted\"",
",",
"ident_id",
")",
"success",
"=",
"True",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Identity not deleted due to %s\"",
",",
"str",
"(",
"e",
")",
")",
"return",
"success"
]
| Delete an identity from SortingHat.
:param sh_db: SortingHat database
:param ident_id: identity identifier | [
"Delete",
"an",
"identity",
"from",
"SortingHat",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/sortinghat_gelk.py#L123-L137 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/sortinghat_gelk.py | SortingHat.remove_unique_identity | def remove_unique_identity(cls, sh_db, uuid):
"""Delete a unique identity from SortingHat.
:param sh_db: SortingHat database
:param uuid: Unique identity identifier
"""
success = False
try:
api.delete_unique_identity(sh_db, uuid)
logger.debug("Unique identity %s deleted", uuid)
success = True
except Exception as e:
logger.debug("Unique identity not deleted due to %s", str(e))
return success | python | def remove_unique_identity(cls, sh_db, uuid):
"""Delete a unique identity from SortingHat.
:param sh_db: SortingHat database
:param uuid: Unique identity identifier
"""
success = False
try:
api.delete_unique_identity(sh_db, uuid)
logger.debug("Unique identity %s deleted", uuid)
success = True
except Exception as e:
logger.debug("Unique identity not deleted due to %s", str(e))
return success | [
"def",
"remove_unique_identity",
"(",
"cls",
",",
"sh_db",
",",
"uuid",
")",
":",
"success",
"=",
"False",
"try",
":",
"api",
".",
"delete_unique_identity",
"(",
"sh_db",
",",
"uuid",
")",
"logger",
".",
"debug",
"(",
"\"Unique identity %s deleted\"",
",",
"uuid",
")",
"success",
"=",
"True",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Unique identity not deleted due to %s\"",
",",
"str",
"(",
"e",
")",
")",
"return",
"success"
]
| Delete a unique identity from SortingHat.
:param sh_db: SortingHat database
:param uuid: Unique identity identifier | [
"Delete",
"a",
"unique",
"identity",
"from",
"SortingHat",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/sortinghat_gelk.py#L140-L154 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/sortinghat_gelk.py | SortingHat.unique_identities | def unique_identities(cls, sh_db):
"""List the unique identities available in SortingHat.
:param sh_db: SortingHat database
"""
try:
for unique_identity in api.unique_identities(sh_db):
yield unique_identity
except Exception as e:
logger.debug("Unique identities not returned from SortingHat due to %s", str(e)) | python | def unique_identities(cls, sh_db):
"""List the unique identities available in SortingHat.
:param sh_db: SortingHat database
"""
try:
for unique_identity in api.unique_identities(sh_db):
yield unique_identity
except Exception as e:
logger.debug("Unique identities not returned from SortingHat due to %s", str(e)) | [
"def",
"unique_identities",
"(",
"cls",
",",
"sh_db",
")",
":",
"try",
":",
"for",
"unique_identity",
"in",
"api",
".",
"unique_identities",
"(",
"sh_db",
")",
":",
"yield",
"unique_identity",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Unique identities not returned from SortingHat due to %s\"",
",",
"str",
"(",
"e",
")",
")"
]
| List the unique identities available in SortingHat.
:param sh_db: SortingHat database | [
"List",
"the",
"unique",
"identities",
"available",
"in",
"SortingHat",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/sortinghat_gelk.py#L157-L166 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/puppetforge.py | PuppetForgeEnrich.get_rich_events | def get_rich_events(self, item):
"""
Get the enriched events related to a module
"""
module = item['data']
if not item['data']['releases']:
return []
for release in item['data']['releases']:
event = self.get_rich_item(item)
# Update specific fields for this release
event["uuid"] += "_" + release['slug']
event["author_url"] = 'https://forge.puppet.com/' + release['module']['owner']['username']
event["gravatar_id"] = release['module']['owner']['gravatar_id']
event["downloads"] = release['downloads']
event["slug"] = release['slug']
event["version"] = release['version']
event["uri"] = release['uri']
event["validation_score"] = release['validation_score']
event["homepage_url"] = None
if 'project_page' in release['metadata']:
event["homepage_url"] = release['metadata']['project_page']
event["issues_url"] = None
if "issues_url" in release['metadata']:
event["issues_url"] = release['metadata']['issues_url']
event["tags"] = release['tags']
event["license"] = release['metadata']['license']
event["source_url"] = release['metadata']['source']
event["summary"] = release['metadata']['summary']
event["metadata__updated_on"] = parser.parse(release['updated_at']).isoformat()
if self.sortinghat:
release["metadata__updated_on"] = event["metadata__updated_on"] # Needed in get_item_sh logic
event.update(self.get_item_sh(release))
if self.prjs_map:
event.update(self.get_item_project(event))
event.update(self.get_grimoire_fields(release["created_at"], "release"))
yield event | python | def get_rich_events(self, item):
"""
Get the enriched events related to a module
"""
module = item['data']
if not item['data']['releases']:
return []
for release in item['data']['releases']:
event = self.get_rich_item(item)
# Update specific fields for this release
event["uuid"] += "_" + release['slug']
event["author_url"] = 'https://forge.puppet.com/' + release['module']['owner']['username']
event["gravatar_id"] = release['module']['owner']['gravatar_id']
event["downloads"] = release['downloads']
event["slug"] = release['slug']
event["version"] = release['version']
event["uri"] = release['uri']
event["validation_score"] = release['validation_score']
event["homepage_url"] = None
if 'project_page' in release['metadata']:
event["homepage_url"] = release['metadata']['project_page']
event["issues_url"] = None
if "issues_url" in release['metadata']:
event["issues_url"] = release['metadata']['issues_url']
event["tags"] = release['tags']
event["license"] = release['metadata']['license']
event["source_url"] = release['metadata']['source']
event["summary"] = release['metadata']['summary']
event["metadata__updated_on"] = parser.parse(release['updated_at']).isoformat()
if self.sortinghat:
release["metadata__updated_on"] = event["metadata__updated_on"] # Needed in get_item_sh logic
event.update(self.get_item_sh(release))
if self.prjs_map:
event.update(self.get_item_project(event))
event.update(self.get_grimoire_fields(release["created_at"], "release"))
yield event | [
"def",
"get_rich_events",
"(",
"self",
",",
"item",
")",
":",
"module",
"=",
"item",
"[",
"'data'",
"]",
"if",
"not",
"item",
"[",
"'data'",
"]",
"[",
"'releases'",
"]",
":",
"return",
"[",
"]",
"for",
"release",
"in",
"item",
"[",
"'data'",
"]",
"[",
"'releases'",
"]",
":",
"event",
"=",
"self",
".",
"get_rich_item",
"(",
"item",
")",
"# Update specific fields for this release",
"event",
"[",
"\"uuid\"",
"]",
"+=",
"\"_\"",
"+",
"release",
"[",
"'slug'",
"]",
"event",
"[",
"\"author_url\"",
"]",
"=",
"'https://forge.puppet.com/'",
"+",
"release",
"[",
"'module'",
"]",
"[",
"'owner'",
"]",
"[",
"'username'",
"]",
"event",
"[",
"\"gravatar_id\"",
"]",
"=",
"release",
"[",
"'module'",
"]",
"[",
"'owner'",
"]",
"[",
"'gravatar_id'",
"]",
"event",
"[",
"\"downloads\"",
"]",
"=",
"release",
"[",
"'downloads'",
"]",
"event",
"[",
"\"slug\"",
"]",
"=",
"release",
"[",
"'slug'",
"]",
"event",
"[",
"\"version\"",
"]",
"=",
"release",
"[",
"'version'",
"]",
"event",
"[",
"\"uri\"",
"]",
"=",
"release",
"[",
"'uri'",
"]",
"event",
"[",
"\"validation_score\"",
"]",
"=",
"release",
"[",
"'validation_score'",
"]",
"event",
"[",
"\"homepage_url\"",
"]",
"=",
"None",
"if",
"'project_page'",
"in",
"release",
"[",
"'metadata'",
"]",
":",
"event",
"[",
"\"homepage_url\"",
"]",
"=",
"release",
"[",
"'metadata'",
"]",
"[",
"'project_page'",
"]",
"event",
"[",
"\"issues_url\"",
"]",
"=",
"None",
"if",
"\"issues_url\"",
"in",
"release",
"[",
"'metadata'",
"]",
":",
"event",
"[",
"\"issues_url\"",
"]",
"=",
"release",
"[",
"'metadata'",
"]",
"[",
"'issues_url'",
"]",
"event",
"[",
"\"tags\"",
"]",
"=",
"release",
"[",
"'tags'",
"]",
"event",
"[",
"\"license\"",
"]",
"=",
"release",
"[",
"'metadata'",
"]",
"[",
"'license'",
"]",
"event",
"[",
"\"source_url\"",
"]",
"=",
"release",
"[",
"'metadata'",
"]",
"[",
"'source'",
"]",
"event",
"[",
"\"summary\"",
"]",
"=",
"release",
"[",
"'metadata'",
"]",
"[",
"'summary'",
"]",
"event",
"[",
"\"metadata__updated_on\"",
"]",
"=",
"parser",
".",
"parse",
"(",
"release",
"[",
"'updated_at'",
"]",
")",
".",
"isoformat",
"(",
")",
"if",
"self",
".",
"sortinghat",
":",
"release",
"[",
"\"metadata__updated_on\"",
"]",
"=",
"event",
"[",
"\"metadata__updated_on\"",
"]",
"# Needed in get_item_sh logic",
"event",
".",
"update",
"(",
"self",
".",
"get_item_sh",
"(",
"release",
")",
")",
"if",
"self",
".",
"prjs_map",
":",
"event",
".",
"update",
"(",
"self",
".",
"get_item_project",
"(",
"event",
")",
")",
"event",
".",
"update",
"(",
"self",
".",
"get_grimoire_fields",
"(",
"release",
"[",
"\"created_at\"",
"]",
",",
"\"release\"",
")",
")",
"yield",
"event"
]
| Get the enriched events related to a module | [
"Get",
"the",
"enriched",
"events",
"related",
"to",
"a",
"module"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/puppetforge.py#L135-L176 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/database.py | Database._connect | def _connect(self):
"""Connect to the MySQL database.
"""
try:
db = pymysql.connect(user=self.user, passwd=self.passwd,
host=self.host, port=self.port,
db=self.shdb, use_unicode=True)
return db, db.cursor()
except Exception:
logger.error("Database connection error")
raise | python | def _connect(self):
"""Connect to the MySQL database.
"""
try:
db = pymysql.connect(user=self.user, passwd=self.passwd,
host=self.host, port=self.port,
db=self.shdb, use_unicode=True)
return db, db.cursor()
except Exception:
logger.error("Database connection error")
raise | [
"def",
"_connect",
"(",
"self",
")",
":",
"try",
":",
"db",
"=",
"pymysql",
".",
"connect",
"(",
"user",
"=",
"self",
".",
"user",
",",
"passwd",
"=",
"self",
".",
"passwd",
",",
"host",
"=",
"self",
".",
"host",
",",
"port",
"=",
"self",
".",
"port",
",",
"db",
"=",
"self",
".",
"shdb",
",",
"use_unicode",
"=",
"True",
")",
"return",
"db",
",",
"db",
".",
"cursor",
"(",
")",
"except",
"Exception",
":",
"logger",
".",
"error",
"(",
"\"Database connection error\"",
")",
"raise"
]
| Connect to the MySQL database. | [
"Connect",
"to",
"the",
"MySQL",
"database",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/database.py#L44-L55 | train |
chaoss/grimoirelab-elk | grimoire_elk/elk.py | refresh_identities | def refresh_identities(enrich_backend, author_field=None, author_values=None):
"""Refresh identities in enriched index.
Retrieve items from the enriched index corresponding to enrich_backend,
and update their identities information, with fresh data from the
SortingHat database.
Instead of the whole index, only items matching the filter_author
filter are fitered, if that parameters is not None.
:param enrich_backend: enriched backend to update
:param author_field: field to match items authored by a user
:param author_values: values of the authored field to match items
"""
def update_items(new_filter_author):
for eitem in enrich_backend.fetch(new_filter_author):
roles = None
try:
roles = enrich_backend.roles
except AttributeError:
pass
new_identities = enrich_backend.get_item_sh_from_id(eitem, roles)
eitem.update(new_identities)
yield eitem
logger.debug("Refreshing identities fields from %s",
enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url))
total = 0
max_ids = enrich_backend.elastic.max_items_clause
logger.debug('Refreshing identities')
if author_field is None:
# No filter, update all items
for item in update_items(None):
yield item
total += 1
else:
to_refresh = []
for author_value in author_values:
to_refresh.append(author_value)
if len(to_refresh) > max_ids:
filter_author = {"name": author_field,
"value": to_refresh}
for item in update_items(filter_author):
yield item
total += 1
to_refresh = []
if len(to_refresh) > 0:
filter_author = {"name": author_field,
"value": to_refresh}
for item in update_items(filter_author):
yield item
total += 1
logger.info("Total eitems refreshed for identities fields %i", total) | python | def refresh_identities(enrich_backend, author_field=None, author_values=None):
"""Refresh identities in enriched index.
Retrieve items from the enriched index corresponding to enrich_backend,
and update their identities information, with fresh data from the
SortingHat database.
Instead of the whole index, only items matching the filter_author
filter are fitered, if that parameters is not None.
:param enrich_backend: enriched backend to update
:param author_field: field to match items authored by a user
:param author_values: values of the authored field to match items
"""
def update_items(new_filter_author):
for eitem in enrich_backend.fetch(new_filter_author):
roles = None
try:
roles = enrich_backend.roles
except AttributeError:
pass
new_identities = enrich_backend.get_item_sh_from_id(eitem, roles)
eitem.update(new_identities)
yield eitem
logger.debug("Refreshing identities fields from %s",
enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url))
total = 0
max_ids = enrich_backend.elastic.max_items_clause
logger.debug('Refreshing identities')
if author_field is None:
# No filter, update all items
for item in update_items(None):
yield item
total += 1
else:
to_refresh = []
for author_value in author_values:
to_refresh.append(author_value)
if len(to_refresh) > max_ids:
filter_author = {"name": author_field,
"value": to_refresh}
for item in update_items(filter_author):
yield item
total += 1
to_refresh = []
if len(to_refresh) > 0:
filter_author = {"name": author_field,
"value": to_refresh}
for item in update_items(filter_author):
yield item
total += 1
logger.info("Total eitems refreshed for identities fields %i", total) | [
"def",
"refresh_identities",
"(",
"enrich_backend",
",",
"author_field",
"=",
"None",
",",
"author_values",
"=",
"None",
")",
":",
"def",
"update_items",
"(",
"new_filter_author",
")",
":",
"for",
"eitem",
"in",
"enrich_backend",
".",
"fetch",
"(",
"new_filter_author",
")",
":",
"roles",
"=",
"None",
"try",
":",
"roles",
"=",
"enrich_backend",
".",
"roles",
"except",
"AttributeError",
":",
"pass",
"new_identities",
"=",
"enrich_backend",
".",
"get_item_sh_from_id",
"(",
"eitem",
",",
"roles",
")",
"eitem",
".",
"update",
"(",
"new_identities",
")",
"yield",
"eitem",
"logger",
".",
"debug",
"(",
"\"Refreshing identities fields from %s\"",
",",
"enrich_backend",
".",
"elastic",
".",
"anonymize_url",
"(",
"enrich_backend",
".",
"elastic",
".",
"index_url",
")",
")",
"total",
"=",
"0",
"max_ids",
"=",
"enrich_backend",
".",
"elastic",
".",
"max_items_clause",
"logger",
".",
"debug",
"(",
"'Refreshing identities'",
")",
"if",
"author_field",
"is",
"None",
":",
"# No filter, update all items",
"for",
"item",
"in",
"update_items",
"(",
"None",
")",
":",
"yield",
"item",
"total",
"+=",
"1",
"else",
":",
"to_refresh",
"=",
"[",
"]",
"for",
"author_value",
"in",
"author_values",
":",
"to_refresh",
".",
"append",
"(",
"author_value",
")",
"if",
"len",
"(",
"to_refresh",
")",
">",
"max_ids",
":",
"filter_author",
"=",
"{",
"\"name\"",
":",
"author_field",
",",
"\"value\"",
":",
"to_refresh",
"}",
"for",
"item",
"in",
"update_items",
"(",
"filter_author",
")",
":",
"yield",
"item",
"total",
"+=",
"1",
"to_refresh",
"=",
"[",
"]",
"if",
"len",
"(",
"to_refresh",
")",
">",
"0",
":",
"filter_author",
"=",
"{",
"\"name\"",
":",
"author_field",
",",
"\"value\"",
":",
"to_refresh",
"}",
"for",
"item",
"in",
"update_items",
"(",
"filter_author",
")",
":",
"yield",
"item",
"total",
"+=",
"1",
"logger",
".",
"info",
"(",
"\"Total eitems refreshed for identities fields %i\"",
",",
"total",
")"
]
| Refresh identities in enriched index.
Retrieve items from the enriched index corresponding to enrich_backend,
and update their identities information, with fresh data from the
SortingHat database.
Instead of the whole index, only items matching the filter_author
filter are fitered, if that parameters is not None.
:param enrich_backend: enriched backend to update
:param author_field: field to match items authored by a user
:param author_values: values of the authored field to match items | [
"Refresh",
"identities",
"in",
"enriched",
"index",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elk.py#L309-L372 | train |
chaoss/grimoirelab-elk | grimoire_elk/elk.py | get_ocean_backend | def get_ocean_backend(backend_cmd, enrich_backend, no_incremental,
filter_raw=None, filter_raw_should=None):
""" Get the ocean backend configured to start from the last enriched date """
if no_incremental:
last_enrich = None
else:
last_enrich = get_last_enrich(backend_cmd, enrich_backend, filter_raw=filter_raw)
logger.debug("Last enrichment: %s", last_enrich)
backend = None
connector = get_connectors()[enrich_backend.get_connector_name()]
if backend_cmd:
backend_cmd = init_backend(backend_cmd)
backend = backend_cmd.backend
signature = inspect.signature(backend.fetch)
if 'from_date' in signature.parameters:
ocean_backend = connector[1](backend, from_date=last_enrich)
elif 'offset' in signature.parameters:
ocean_backend = connector[1](backend, offset=last_enrich)
else:
if last_enrich:
ocean_backend = connector[1](backend, from_date=last_enrich)
else:
ocean_backend = connector[1](backend)
else:
# We can have params for non perceval backends also
params = enrich_backend.backend_params
if params:
try:
date_pos = params.index('--from-date')
last_enrich = parser.parse(params[date_pos + 1])
except ValueError:
pass
if last_enrich:
ocean_backend = connector[1](backend, from_date=last_enrich)
else:
ocean_backend = connector[1](backend)
if filter_raw:
ocean_backend.set_filter_raw(filter_raw)
if filter_raw_should:
ocean_backend.set_filter_raw_should(filter_raw_should)
return ocean_backend | python | def get_ocean_backend(backend_cmd, enrich_backend, no_incremental,
filter_raw=None, filter_raw_should=None):
""" Get the ocean backend configured to start from the last enriched date """
if no_incremental:
last_enrich = None
else:
last_enrich = get_last_enrich(backend_cmd, enrich_backend, filter_raw=filter_raw)
logger.debug("Last enrichment: %s", last_enrich)
backend = None
connector = get_connectors()[enrich_backend.get_connector_name()]
if backend_cmd:
backend_cmd = init_backend(backend_cmd)
backend = backend_cmd.backend
signature = inspect.signature(backend.fetch)
if 'from_date' in signature.parameters:
ocean_backend = connector[1](backend, from_date=last_enrich)
elif 'offset' in signature.parameters:
ocean_backend = connector[1](backend, offset=last_enrich)
else:
if last_enrich:
ocean_backend = connector[1](backend, from_date=last_enrich)
else:
ocean_backend = connector[1](backend)
else:
# We can have params for non perceval backends also
params = enrich_backend.backend_params
if params:
try:
date_pos = params.index('--from-date')
last_enrich = parser.parse(params[date_pos + 1])
except ValueError:
pass
if last_enrich:
ocean_backend = connector[1](backend, from_date=last_enrich)
else:
ocean_backend = connector[1](backend)
if filter_raw:
ocean_backend.set_filter_raw(filter_raw)
if filter_raw_should:
ocean_backend.set_filter_raw_should(filter_raw_should)
return ocean_backend | [
"def",
"get_ocean_backend",
"(",
"backend_cmd",
",",
"enrich_backend",
",",
"no_incremental",
",",
"filter_raw",
"=",
"None",
",",
"filter_raw_should",
"=",
"None",
")",
":",
"if",
"no_incremental",
":",
"last_enrich",
"=",
"None",
"else",
":",
"last_enrich",
"=",
"get_last_enrich",
"(",
"backend_cmd",
",",
"enrich_backend",
",",
"filter_raw",
"=",
"filter_raw",
")",
"logger",
".",
"debug",
"(",
"\"Last enrichment: %s\"",
",",
"last_enrich",
")",
"backend",
"=",
"None",
"connector",
"=",
"get_connectors",
"(",
")",
"[",
"enrich_backend",
".",
"get_connector_name",
"(",
")",
"]",
"if",
"backend_cmd",
":",
"backend_cmd",
"=",
"init_backend",
"(",
"backend_cmd",
")",
"backend",
"=",
"backend_cmd",
".",
"backend",
"signature",
"=",
"inspect",
".",
"signature",
"(",
"backend",
".",
"fetch",
")",
"if",
"'from_date'",
"in",
"signature",
".",
"parameters",
":",
"ocean_backend",
"=",
"connector",
"[",
"1",
"]",
"(",
"backend",
",",
"from_date",
"=",
"last_enrich",
")",
"elif",
"'offset'",
"in",
"signature",
".",
"parameters",
":",
"ocean_backend",
"=",
"connector",
"[",
"1",
"]",
"(",
"backend",
",",
"offset",
"=",
"last_enrich",
")",
"else",
":",
"if",
"last_enrich",
":",
"ocean_backend",
"=",
"connector",
"[",
"1",
"]",
"(",
"backend",
",",
"from_date",
"=",
"last_enrich",
")",
"else",
":",
"ocean_backend",
"=",
"connector",
"[",
"1",
"]",
"(",
"backend",
")",
"else",
":",
"# We can have params for non perceval backends also",
"params",
"=",
"enrich_backend",
".",
"backend_params",
"if",
"params",
":",
"try",
":",
"date_pos",
"=",
"params",
".",
"index",
"(",
"'--from-date'",
")",
"last_enrich",
"=",
"parser",
".",
"parse",
"(",
"params",
"[",
"date_pos",
"+",
"1",
"]",
")",
"except",
"ValueError",
":",
"pass",
"if",
"last_enrich",
":",
"ocean_backend",
"=",
"connector",
"[",
"1",
"]",
"(",
"backend",
",",
"from_date",
"=",
"last_enrich",
")",
"else",
":",
"ocean_backend",
"=",
"connector",
"[",
"1",
"]",
"(",
"backend",
")",
"if",
"filter_raw",
":",
"ocean_backend",
".",
"set_filter_raw",
"(",
"filter_raw",
")",
"if",
"filter_raw_should",
":",
"ocean_backend",
".",
"set_filter_raw_should",
"(",
"filter_raw_should",
")",
"return",
"ocean_backend"
]
| Get the ocean backend configured to start from the last enriched date | [
"Get",
"the",
"ocean",
"backend",
"configured",
"to",
"start",
"from",
"the",
"last",
"enriched",
"date"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elk.py#L439-L487 | train |
chaoss/grimoirelab-elk | grimoire_elk/elk.py | do_studies | def do_studies(ocean_backend, enrich_backend, studies_args, retention_time=None):
"""Execute studies related to a given enrich backend. If `retention_time` is not None, the
study data is deleted based on the number of minutes declared in `retention_time`.
:param ocean_backend: backend to access raw items
:param enrich_backend: backend to access enriched items
:param retention_time: maximum number of minutes wrt the current date to retain the data
:param studies_args: list of studies to be executed
"""
for study in enrich_backend.studies:
selected_studies = [(s['name'], s['params']) for s in studies_args if s['type'] == study.__name__]
for (name, params) in selected_studies:
logger.info("Starting study: %s, params %s", name, str(params))
try:
study(ocean_backend, enrich_backend, **params)
except Exception as e:
logger.error("Problem executing study %s, %s", name, str(e))
raise e
# identify studies which creates other indexes. If the study is onion,
# it can be ignored since the index is recreated every week
if name.startswith('enrich_onion'):
continue
index_params = [p for p in params if 'out_index' in p]
for ip in index_params:
index_name = params[ip]
elastic = get_elastic(enrich_backend.elastic_url, index_name)
elastic.delete_items(retention_time) | python | def do_studies(ocean_backend, enrich_backend, studies_args, retention_time=None):
"""Execute studies related to a given enrich backend. If `retention_time` is not None, the
study data is deleted based on the number of minutes declared in `retention_time`.
:param ocean_backend: backend to access raw items
:param enrich_backend: backend to access enriched items
:param retention_time: maximum number of minutes wrt the current date to retain the data
:param studies_args: list of studies to be executed
"""
for study in enrich_backend.studies:
selected_studies = [(s['name'], s['params']) for s in studies_args if s['type'] == study.__name__]
for (name, params) in selected_studies:
logger.info("Starting study: %s, params %s", name, str(params))
try:
study(ocean_backend, enrich_backend, **params)
except Exception as e:
logger.error("Problem executing study %s, %s", name, str(e))
raise e
# identify studies which creates other indexes. If the study is onion,
# it can be ignored since the index is recreated every week
if name.startswith('enrich_onion'):
continue
index_params = [p for p in params if 'out_index' in p]
for ip in index_params:
index_name = params[ip]
elastic = get_elastic(enrich_backend.elastic_url, index_name)
elastic.delete_items(retention_time) | [
"def",
"do_studies",
"(",
"ocean_backend",
",",
"enrich_backend",
",",
"studies_args",
",",
"retention_time",
"=",
"None",
")",
":",
"for",
"study",
"in",
"enrich_backend",
".",
"studies",
":",
"selected_studies",
"=",
"[",
"(",
"s",
"[",
"'name'",
"]",
",",
"s",
"[",
"'params'",
"]",
")",
"for",
"s",
"in",
"studies_args",
"if",
"s",
"[",
"'type'",
"]",
"==",
"study",
".",
"__name__",
"]",
"for",
"(",
"name",
",",
"params",
")",
"in",
"selected_studies",
":",
"logger",
".",
"info",
"(",
"\"Starting study: %s, params %s\"",
",",
"name",
",",
"str",
"(",
"params",
")",
")",
"try",
":",
"study",
"(",
"ocean_backend",
",",
"enrich_backend",
",",
"*",
"*",
"params",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"Problem executing study %s, %s\"",
",",
"name",
",",
"str",
"(",
"e",
")",
")",
"raise",
"e",
"# identify studies which creates other indexes. If the study is onion,",
"# it can be ignored since the index is recreated every week",
"if",
"name",
".",
"startswith",
"(",
"'enrich_onion'",
")",
":",
"continue",
"index_params",
"=",
"[",
"p",
"for",
"p",
"in",
"params",
"if",
"'out_index'",
"in",
"p",
"]",
"for",
"ip",
"in",
"index_params",
":",
"index_name",
"=",
"params",
"[",
"ip",
"]",
"elastic",
"=",
"get_elastic",
"(",
"enrich_backend",
".",
"elastic_url",
",",
"index_name",
")",
"elastic",
".",
"delete_items",
"(",
"retention_time",
")"
]
| Execute studies related to a given enrich backend. If `retention_time` is not None, the
study data is deleted based on the number of minutes declared in `retention_time`.
:param ocean_backend: backend to access raw items
:param enrich_backend: backend to access enriched items
:param retention_time: maximum number of minutes wrt the current date to retain the data
:param studies_args: list of studies to be executed | [
"Execute",
"studies",
"related",
"to",
"a",
"given",
"enrich",
"backend",
".",
"If",
"retention_time",
"is",
"not",
"None",
"the",
"study",
"data",
"is",
"deleted",
"based",
"on",
"the",
"number",
"of",
"minutes",
"declared",
"in",
"retention_time",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elk.py#L490-L521 | train |
chaoss/grimoirelab-elk | grimoire_elk/elk.py | delete_orphan_unique_identities | def delete_orphan_unique_identities(es, sortinghat_db, current_data_source, active_data_sources):
"""Delete all unique identities which appear in SortingHat, but not in the IDENTITIES_INDEX.
:param es: ElasticSearchDSL object
:param sortinghat_db: instance of the SortingHat database
:param current_data_source: current data source
:param active_data_sources: list of active data sources
"""
def get_uuids_in_index(target_uuids):
"""Find a set of uuids in IDENTITIES_INDEX and return them if exist.
:param target_uuids: target uuids
"""
page = es.search(
index=IDENTITIES_INDEX,
scroll="360m",
size=SIZE_SCROLL_IDENTITIES_INDEX,
body={
"query": {
"bool": {
"filter": [
{
"terms": {
"sh_uuid": target_uuids
}
}
]
}
}
}
)
hits = []
if page['hits']['total'] != 0:
hits = page['hits']['hits']
return hits
def delete_unique_identities(target_uuids):
"""Delete a list of uuids from SortingHat.
:param target_uuids: uuids to be deleted
"""
count = 0
for uuid in target_uuids:
success = SortingHat.remove_unique_identity(sortinghat_db, uuid)
count = count + 1 if success else count
return count
def delete_identities(unique_ident, data_sources):
"""Remove the identities in non active data sources.
:param unique_ident: unique identity object
:param data_sources: target data sources
"""
count = 0
for ident in unique_ident.identities:
if ident.source not in data_sources:
success = SortingHat.remove_identity(sortinghat_db, ident.id)
count = count + 1 if success else count
return count
def has_identities_in_data_sources(unique_ident, data_sources):
"""Check if a unique identity has identities in a set of data sources.
:param unique_ident: unique identity object
:param data_sources: target data sources
"""
in_active = False
for ident in unique_ident.identities:
if ident.source in data_sources:
in_active = True
break
return in_active
deleted_unique_identities = 0
deleted_identities = 0
uuids_to_process = []
# Collect all unique identities
for unique_identity in SortingHat.unique_identities(sortinghat_db):
# Remove a unique identity if all its identities are in non active data source
if not has_identities_in_data_sources(unique_identity, active_data_sources):
deleted_unique_identities += delete_unique_identities([unique_identity.uuid])
continue
# Remove the identities of non active data source for a given unique identity
deleted_identities += delete_identities(unique_identity, active_data_sources)
# Process only the unique identities that include the current data source, since
# it may be that unique identities in other data source have not been
# added yet to IDENTITIES_INDEX
if not has_identities_in_data_sources(unique_identity, [current_data_source]):
continue
# Add the uuid to the list to check its existence in the IDENTITIES_INDEX
uuids_to_process.append(unique_identity.uuid)
# Process the uuids in block of SIZE_SCROLL_IDENTITIES_INDEX
if len(uuids_to_process) != SIZE_SCROLL_IDENTITIES_INDEX:
continue
# Find which uuids to be processed exist in IDENTITIES_INDEX
results = get_uuids_in_index(uuids_to_process)
uuids_found = [item['_source']['sh_uuid'] for item in results]
# Find the uuids which exist in SortingHat but not in IDENTITIES_INDEX
orphan_uuids = set(uuids_to_process) - set(uuids_found)
# Delete the orphan uuids from SortingHat
deleted_unique_identities += delete_unique_identities(orphan_uuids)
# Reset the list
uuids_to_process = []
# Check that no uuids have been left to process
if uuids_to_process:
# Find which uuids to be processed exist in IDENTITIES_INDEX
results = get_uuids_in_index(uuids_to_process)
uuids_found = [item['_source']['sh_uuid'] for item in results]
# Find the uuids which exist in SortingHat but not in IDENTITIES_INDEX
orphan_uuids = set(uuids_to_process) - set(uuids_found)
# Delete the orphan uuids from SortingHat
deleted_unique_identities += delete_unique_identities(orphan_uuids)
logger.debug("[identities retention] Total orphan unique identities deleted from SH: %i",
deleted_unique_identities)
logger.debug("[identities retention] Total identities in non-active data sources deleted from SH: %i",
deleted_identities) | python | def delete_orphan_unique_identities(es, sortinghat_db, current_data_source, active_data_sources):
"""Delete all unique identities which appear in SortingHat, but not in the IDENTITIES_INDEX.
:param es: ElasticSearchDSL object
:param sortinghat_db: instance of the SortingHat database
:param current_data_source: current data source
:param active_data_sources: list of active data sources
"""
def get_uuids_in_index(target_uuids):
"""Find a set of uuids in IDENTITIES_INDEX and return them if exist.
:param target_uuids: target uuids
"""
page = es.search(
index=IDENTITIES_INDEX,
scroll="360m",
size=SIZE_SCROLL_IDENTITIES_INDEX,
body={
"query": {
"bool": {
"filter": [
{
"terms": {
"sh_uuid": target_uuids
}
}
]
}
}
}
)
hits = []
if page['hits']['total'] != 0:
hits = page['hits']['hits']
return hits
def delete_unique_identities(target_uuids):
"""Delete a list of uuids from SortingHat.
:param target_uuids: uuids to be deleted
"""
count = 0
for uuid in target_uuids:
success = SortingHat.remove_unique_identity(sortinghat_db, uuid)
count = count + 1 if success else count
return count
def delete_identities(unique_ident, data_sources):
"""Remove the identities in non active data sources.
:param unique_ident: unique identity object
:param data_sources: target data sources
"""
count = 0
for ident in unique_ident.identities:
if ident.source not in data_sources:
success = SortingHat.remove_identity(sortinghat_db, ident.id)
count = count + 1 if success else count
return count
def has_identities_in_data_sources(unique_ident, data_sources):
"""Check if a unique identity has identities in a set of data sources.
:param unique_ident: unique identity object
:param data_sources: target data sources
"""
in_active = False
for ident in unique_ident.identities:
if ident.source in data_sources:
in_active = True
break
return in_active
deleted_unique_identities = 0
deleted_identities = 0
uuids_to_process = []
# Collect all unique identities
for unique_identity in SortingHat.unique_identities(sortinghat_db):
# Remove a unique identity if all its identities are in non active data source
if not has_identities_in_data_sources(unique_identity, active_data_sources):
deleted_unique_identities += delete_unique_identities([unique_identity.uuid])
continue
# Remove the identities of non active data source for a given unique identity
deleted_identities += delete_identities(unique_identity, active_data_sources)
# Process only the unique identities that include the current data source, since
# it may be that unique identities in other data source have not been
# added yet to IDENTITIES_INDEX
if not has_identities_in_data_sources(unique_identity, [current_data_source]):
continue
# Add the uuid to the list to check its existence in the IDENTITIES_INDEX
uuids_to_process.append(unique_identity.uuid)
# Process the uuids in block of SIZE_SCROLL_IDENTITIES_INDEX
if len(uuids_to_process) != SIZE_SCROLL_IDENTITIES_INDEX:
continue
# Find which uuids to be processed exist in IDENTITIES_INDEX
results = get_uuids_in_index(uuids_to_process)
uuids_found = [item['_source']['sh_uuid'] for item in results]
# Find the uuids which exist in SortingHat but not in IDENTITIES_INDEX
orphan_uuids = set(uuids_to_process) - set(uuids_found)
# Delete the orphan uuids from SortingHat
deleted_unique_identities += delete_unique_identities(orphan_uuids)
# Reset the list
uuids_to_process = []
# Check that no uuids have been left to process
if uuids_to_process:
# Find which uuids to be processed exist in IDENTITIES_INDEX
results = get_uuids_in_index(uuids_to_process)
uuids_found = [item['_source']['sh_uuid'] for item in results]
# Find the uuids which exist in SortingHat but not in IDENTITIES_INDEX
orphan_uuids = set(uuids_to_process) - set(uuids_found)
# Delete the orphan uuids from SortingHat
deleted_unique_identities += delete_unique_identities(orphan_uuids)
logger.debug("[identities retention] Total orphan unique identities deleted from SH: %i",
deleted_unique_identities)
logger.debug("[identities retention] Total identities in non-active data sources deleted from SH: %i",
deleted_identities) | [
"def",
"delete_orphan_unique_identities",
"(",
"es",
",",
"sortinghat_db",
",",
"current_data_source",
",",
"active_data_sources",
")",
":",
"def",
"get_uuids_in_index",
"(",
"target_uuids",
")",
":",
"\"\"\"Find a set of uuids in IDENTITIES_INDEX and return them if exist.\n\n :param target_uuids: target uuids\n \"\"\"",
"page",
"=",
"es",
".",
"search",
"(",
"index",
"=",
"IDENTITIES_INDEX",
",",
"scroll",
"=",
"\"360m\"",
",",
"size",
"=",
"SIZE_SCROLL_IDENTITIES_INDEX",
",",
"body",
"=",
"{",
"\"query\"",
":",
"{",
"\"bool\"",
":",
"{",
"\"filter\"",
":",
"[",
"{",
"\"terms\"",
":",
"{",
"\"sh_uuid\"",
":",
"target_uuids",
"}",
"}",
"]",
"}",
"}",
"}",
")",
"hits",
"=",
"[",
"]",
"if",
"page",
"[",
"'hits'",
"]",
"[",
"'total'",
"]",
"!=",
"0",
":",
"hits",
"=",
"page",
"[",
"'hits'",
"]",
"[",
"'hits'",
"]",
"return",
"hits",
"def",
"delete_unique_identities",
"(",
"target_uuids",
")",
":",
"\"\"\"Delete a list of uuids from SortingHat.\n\n :param target_uuids: uuids to be deleted\n \"\"\"",
"count",
"=",
"0",
"for",
"uuid",
"in",
"target_uuids",
":",
"success",
"=",
"SortingHat",
".",
"remove_unique_identity",
"(",
"sortinghat_db",
",",
"uuid",
")",
"count",
"=",
"count",
"+",
"1",
"if",
"success",
"else",
"count",
"return",
"count",
"def",
"delete_identities",
"(",
"unique_ident",
",",
"data_sources",
")",
":",
"\"\"\"Remove the identities in non active data sources.\n\n :param unique_ident: unique identity object\n :param data_sources: target data sources\n \"\"\"",
"count",
"=",
"0",
"for",
"ident",
"in",
"unique_ident",
".",
"identities",
":",
"if",
"ident",
".",
"source",
"not",
"in",
"data_sources",
":",
"success",
"=",
"SortingHat",
".",
"remove_identity",
"(",
"sortinghat_db",
",",
"ident",
".",
"id",
")",
"count",
"=",
"count",
"+",
"1",
"if",
"success",
"else",
"count",
"return",
"count",
"def",
"has_identities_in_data_sources",
"(",
"unique_ident",
",",
"data_sources",
")",
":",
"\"\"\"Check if a unique identity has identities in a set of data sources.\n\n :param unique_ident: unique identity object\n :param data_sources: target data sources\n \"\"\"",
"in_active",
"=",
"False",
"for",
"ident",
"in",
"unique_ident",
".",
"identities",
":",
"if",
"ident",
".",
"source",
"in",
"data_sources",
":",
"in_active",
"=",
"True",
"break",
"return",
"in_active",
"deleted_unique_identities",
"=",
"0",
"deleted_identities",
"=",
"0",
"uuids_to_process",
"=",
"[",
"]",
"# Collect all unique identities",
"for",
"unique_identity",
"in",
"SortingHat",
".",
"unique_identities",
"(",
"sortinghat_db",
")",
":",
"# Remove a unique identity if all its identities are in non active data source",
"if",
"not",
"has_identities_in_data_sources",
"(",
"unique_identity",
",",
"active_data_sources",
")",
":",
"deleted_unique_identities",
"+=",
"delete_unique_identities",
"(",
"[",
"unique_identity",
".",
"uuid",
"]",
")",
"continue",
"# Remove the identities of non active data source for a given unique identity",
"deleted_identities",
"+=",
"delete_identities",
"(",
"unique_identity",
",",
"active_data_sources",
")",
"# Process only the unique identities that include the current data source, since",
"# it may be that unique identities in other data source have not been",
"# added yet to IDENTITIES_INDEX",
"if",
"not",
"has_identities_in_data_sources",
"(",
"unique_identity",
",",
"[",
"current_data_source",
"]",
")",
":",
"continue",
"# Add the uuid to the list to check its existence in the IDENTITIES_INDEX",
"uuids_to_process",
".",
"append",
"(",
"unique_identity",
".",
"uuid",
")",
"# Process the uuids in block of SIZE_SCROLL_IDENTITIES_INDEX",
"if",
"len",
"(",
"uuids_to_process",
")",
"!=",
"SIZE_SCROLL_IDENTITIES_INDEX",
":",
"continue",
"# Find which uuids to be processed exist in IDENTITIES_INDEX",
"results",
"=",
"get_uuids_in_index",
"(",
"uuids_to_process",
")",
"uuids_found",
"=",
"[",
"item",
"[",
"'_source'",
"]",
"[",
"'sh_uuid'",
"]",
"for",
"item",
"in",
"results",
"]",
"# Find the uuids which exist in SortingHat but not in IDENTITIES_INDEX",
"orphan_uuids",
"=",
"set",
"(",
"uuids_to_process",
")",
"-",
"set",
"(",
"uuids_found",
")",
"# Delete the orphan uuids from SortingHat",
"deleted_unique_identities",
"+=",
"delete_unique_identities",
"(",
"orphan_uuids",
")",
"# Reset the list",
"uuids_to_process",
"=",
"[",
"]",
"# Check that no uuids have been left to process",
"if",
"uuids_to_process",
":",
"# Find which uuids to be processed exist in IDENTITIES_INDEX",
"results",
"=",
"get_uuids_in_index",
"(",
"uuids_to_process",
")",
"uuids_found",
"=",
"[",
"item",
"[",
"'_source'",
"]",
"[",
"'sh_uuid'",
"]",
"for",
"item",
"in",
"results",
"]",
"# Find the uuids which exist in SortingHat but not in IDENTITIES_INDEX",
"orphan_uuids",
"=",
"set",
"(",
"uuids_to_process",
")",
"-",
"set",
"(",
"uuids_found",
")",
"# Delete the orphan uuids from SortingHat",
"deleted_unique_identities",
"+=",
"delete_unique_identities",
"(",
"orphan_uuids",
")",
"logger",
".",
"debug",
"(",
"\"[identities retention] Total orphan unique identities deleted from SH: %i\"",
",",
"deleted_unique_identities",
")",
"logger",
".",
"debug",
"(",
"\"[identities retention] Total identities in non-active data sources deleted from SH: %i\"",
",",
"deleted_identities",
")"
]
| Delete all unique identities which appear in SortingHat, but not in the IDENTITIES_INDEX.
:param es: ElasticSearchDSL object
:param sortinghat_db: instance of the SortingHat database
:param current_data_source: current data source
:param active_data_sources: list of active data sources | [
"Delete",
"all",
"unique",
"identities",
"which",
"appear",
"in",
"SortingHat",
"but",
"not",
"in",
"the",
"IDENTITIES_INDEX",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elk.py#L671-L804 | train |
chaoss/grimoirelab-elk | grimoire_elk/elk.py | delete_inactive_unique_identities | def delete_inactive_unique_identities(es, sortinghat_db, before_date):
"""Select the unique identities not seen before `before_date` and
delete them from SortingHat.
:param es: ElasticSearchDSL object
:param sortinghat_db: instance of the SortingHat database
:param before_date: datetime str to filter the identities
"""
page = es.search(
index=IDENTITIES_INDEX,
scroll="360m",
size=SIZE_SCROLL_IDENTITIES_INDEX,
body={
"query": {
"range": {
"last_seen": {
"lte": before_date
}
}
}
}
)
sid = page['_scroll_id']
scroll_size = page['hits']['total']
if scroll_size == 0:
logging.warning("[identities retention] No inactive identities found in %s after %s!",
IDENTITIES_INDEX, before_date)
return
count = 0
while scroll_size > 0:
for item in page['hits']['hits']:
to_delete = item['_source']['sh_uuid']
success = SortingHat.remove_unique_identity(sortinghat_db, to_delete)
# increment the number of deleted identities only if the corresponding command was successful
count = count + 1 if success else count
page = es.scroll(scroll_id=sid, scroll='60m')
sid = page['_scroll_id']
scroll_size = len(page['hits']['hits'])
logger.debug("[identities retention] Total inactive identities deleted from SH: %i", count) | python | def delete_inactive_unique_identities(es, sortinghat_db, before_date):
"""Select the unique identities not seen before `before_date` and
delete them from SortingHat.
:param es: ElasticSearchDSL object
:param sortinghat_db: instance of the SortingHat database
:param before_date: datetime str to filter the identities
"""
page = es.search(
index=IDENTITIES_INDEX,
scroll="360m",
size=SIZE_SCROLL_IDENTITIES_INDEX,
body={
"query": {
"range": {
"last_seen": {
"lte": before_date
}
}
}
}
)
sid = page['_scroll_id']
scroll_size = page['hits']['total']
if scroll_size == 0:
logging.warning("[identities retention] No inactive identities found in %s after %s!",
IDENTITIES_INDEX, before_date)
return
count = 0
while scroll_size > 0:
for item in page['hits']['hits']:
to_delete = item['_source']['sh_uuid']
success = SortingHat.remove_unique_identity(sortinghat_db, to_delete)
# increment the number of deleted identities only if the corresponding command was successful
count = count + 1 if success else count
page = es.scroll(scroll_id=sid, scroll='60m')
sid = page['_scroll_id']
scroll_size = len(page['hits']['hits'])
logger.debug("[identities retention] Total inactive identities deleted from SH: %i", count) | [
"def",
"delete_inactive_unique_identities",
"(",
"es",
",",
"sortinghat_db",
",",
"before_date",
")",
":",
"page",
"=",
"es",
".",
"search",
"(",
"index",
"=",
"IDENTITIES_INDEX",
",",
"scroll",
"=",
"\"360m\"",
",",
"size",
"=",
"SIZE_SCROLL_IDENTITIES_INDEX",
",",
"body",
"=",
"{",
"\"query\"",
":",
"{",
"\"range\"",
":",
"{",
"\"last_seen\"",
":",
"{",
"\"lte\"",
":",
"before_date",
"}",
"}",
"}",
"}",
")",
"sid",
"=",
"page",
"[",
"'_scroll_id'",
"]",
"scroll_size",
"=",
"page",
"[",
"'hits'",
"]",
"[",
"'total'",
"]",
"if",
"scroll_size",
"==",
"0",
":",
"logging",
".",
"warning",
"(",
"\"[identities retention] No inactive identities found in %s after %s!\"",
",",
"IDENTITIES_INDEX",
",",
"before_date",
")",
"return",
"count",
"=",
"0",
"while",
"scroll_size",
">",
"0",
":",
"for",
"item",
"in",
"page",
"[",
"'hits'",
"]",
"[",
"'hits'",
"]",
":",
"to_delete",
"=",
"item",
"[",
"'_source'",
"]",
"[",
"'sh_uuid'",
"]",
"success",
"=",
"SortingHat",
".",
"remove_unique_identity",
"(",
"sortinghat_db",
",",
"to_delete",
")",
"# increment the number of deleted identities only if the corresponding command was successful",
"count",
"=",
"count",
"+",
"1",
"if",
"success",
"else",
"count",
"page",
"=",
"es",
".",
"scroll",
"(",
"scroll_id",
"=",
"sid",
",",
"scroll",
"=",
"'60m'",
")",
"sid",
"=",
"page",
"[",
"'_scroll_id'",
"]",
"scroll_size",
"=",
"len",
"(",
"page",
"[",
"'hits'",
"]",
"[",
"'hits'",
"]",
")",
"logger",
".",
"debug",
"(",
"\"[identities retention] Total inactive identities deleted from SH: %i\"",
",",
"count",
")"
]
| Select the unique identities not seen before `before_date` and
delete them from SortingHat.
:param es: ElasticSearchDSL object
:param sortinghat_db: instance of the SortingHat database
:param before_date: datetime str to filter the identities | [
"Select",
"the",
"unique",
"identities",
"not",
"seen",
"before",
"before_date",
"and",
"delete",
"them",
"from",
"SortingHat",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elk.py#L807-L851 | train |
chaoss/grimoirelab-elk | grimoire_elk/elk.py | retain_identities | def retain_identities(retention_time, es_enrichment_url, sortinghat_db, data_source, active_data_sources):
"""Select the unique identities not seen before `retention_time` and
delete them from SortingHat. Furthermore, it deletes also the orphan unique identities,
those ones stored in SortingHat but not in IDENTITIES_INDEX.
:param retention_time: maximum number of minutes wrt the current date to retain the identities
:param es_enrichment_url: URL of the ElasticSearch where the enriched data is stored
:param sortinghat_db: instance of the SortingHat database
:param data_source: target data source (e.g., git, github, slack)
:param active_data_sources: list of active data sources
"""
before_date = get_diff_current_date(minutes=retention_time)
before_date_str = before_date.isoformat()
es = Elasticsearch([es_enrichment_url], timeout=120, max_retries=20, retry_on_timeout=True, verify_certs=False)
# delete the unique identities which have not been seen after `before_date`
delete_inactive_unique_identities(es, sortinghat_db, before_date_str)
# delete the unique identities for a given data source which are not in the IDENTITIES_INDEX
delete_orphan_unique_identities(es, sortinghat_db, data_source, active_data_sources) | python | def retain_identities(retention_time, es_enrichment_url, sortinghat_db, data_source, active_data_sources):
"""Select the unique identities not seen before `retention_time` and
delete them from SortingHat. Furthermore, it deletes also the orphan unique identities,
those ones stored in SortingHat but not in IDENTITIES_INDEX.
:param retention_time: maximum number of minutes wrt the current date to retain the identities
:param es_enrichment_url: URL of the ElasticSearch where the enriched data is stored
:param sortinghat_db: instance of the SortingHat database
:param data_source: target data source (e.g., git, github, slack)
:param active_data_sources: list of active data sources
"""
before_date = get_diff_current_date(minutes=retention_time)
before_date_str = before_date.isoformat()
es = Elasticsearch([es_enrichment_url], timeout=120, max_retries=20, retry_on_timeout=True, verify_certs=False)
# delete the unique identities which have not been seen after `before_date`
delete_inactive_unique_identities(es, sortinghat_db, before_date_str)
# delete the unique identities for a given data source which are not in the IDENTITIES_INDEX
delete_orphan_unique_identities(es, sortinghat_db, data_source, active_data_sources) | [
"def",
"retain_identities",
"(",
"retention_time",
",",
"es_enrichment_url",
",",
"sortinghat_db",
",",
"data_source",
",",
"active_data_sources",
")",
":",
"before_date",
"=",
"get_diff_current_date",
"(",
"minutes",
"=",
"retention_time",
")",
"before_date_str",
"=",
"before_date",
".",
"isoformat",
"(",
")",
"es",
"=",
"Elasticsearch",
"(",
"[",
"es_enrichment_url",
"]",
",",
"timeout",
"=",
"120",
",",
"max_retries",
"=",
"20",
",",
"retry_on_timeout",
"=",
"True",
",",
"verify_certs",
"=",
"False",
")",
"# delete the unique identities which have not been seen after `before_date`",
"delete_inactive_unique_identities",
"(",
"es",
",",
"sortinghat_db",
",",
"before_date_str",
")",
"# delete the unique identities for a given data source which are not in the IDENTITIES_INDEX",
"delete_orphan_unique_identities",
"(",
"es",
",",
"sortinghat_db",
",",
"data_source",
",",
"active_data_sources",
")"
]
| Select the unique identities not seen before `retention_time` and
delete them from SortingHat. Furthermore, it deletes also the orphan unique identities,
those ones stored in SortingHat but not in IDENTITIES_INDEX.
:param retention_time: maximum number of minutes wrt the current date to retain the identities
:param es_enrichment_url: URL of the ElasticSearch where the enriched data is stored
:param sortinghat_db: instance of the SortingHat database
:param data_source: target data source (e.g., git, github, slack)
:param active_data_sources: list of active data sources | [
"Select",
"the",
"unique",
"identities",
"not",
"seen",
"before",
"retention_time",
"and",
"delete",
"them",
"from",
"SortingHat",
".",
"Furthermore",
"it",
"deletes",
"also",
"the",
"orphan",
"unique",
"identities",
"those",
"ones",
"stored",
"in",
"SortingHat",
"but",
"not",
"in",
"IDENTITIES_INDEX",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elk.py#L854-L873 | train |
chaoss/grimoirelab-elk | grimoire_elk/elk.py | init_backend | def init_backend(backend_cmd):
"""Init backend within the backend_cmd"""
try:
backend_cmd.backend
except AttributeError:
parsed_args = vars(backend_cmd.parsed_args)
init_args = find_signature_parameters(backend_cmd.BACKEND,
parsed_args)
backend_cmd.backend = backend_cmd.BACKEND(**init_args)
return backend_cmd | python | def init_backend(backend_cmd):
"""Init backend within the backend_cmd"""
try:
backend_cmd.backend
except AttributeError:
parsed_args = vars(backend_cmd.parsed_args)
init_args = find_signature_parameters(backend_cmd.BACKEND,
parsed_args)
backend_cmd.backend = backend_cmd.BACKEND(**init_args)
return backend_cmd | [
"def",
"init_backend",
"(",
"backend_cmd",
")",
":",
"try",
":",
"backend_cmd",
".",
"backend",
"except",
"AttributeError",
":",
"parsed_args",
"=",
"vars",
"(",
"backend_cmd",
".",
"parsed_args",
")",
"init_args",
"=",
"find_signature_parameters",
"(",
"backend_cmd",
".",
"BACKEND",
",",
"parsed_args",
")",
"backend_cmd",
".",
"backend",
"=",
"backend_cmd",
".",
"BACKEND",
"(",
"*",
"*",
"init_args",
")",
"return",
"backend_cmd"
]
| Init backend within the backend_cmd | [
"Init",
"backend",
"within",
"the",
"backend_cmd"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elk.py#L876-L887 | train |
chaoss/grimoirelab-elk | grimoire_elk/elastic.py | ElasticSearch.safe_index | def safe_index(cls, unique_id):
""" Return a valid elastic index generated from unique_id """
index = unique_id
if unique_id:
index = unique_id.replace("/", "_").lower()
return index | python | def safe_index(cls, unique_id):
""" Return a valid elastic index generated from unique_id """
index = unique_id
if unique_id:
index = unique_id.replace("/", "_").lower()
return index | [
"def",
"safe_index",
"(",
"cls",
",",
"unique_id",
")",
":",
"index",
"=",
"unique_id",
"if",
"unique_id",
":",
"index",
"=",
"unique_id",
".",
"replace",
"(",
"\"/\"",
",",
"\"_\"",
")",
".",
"lower",
"(",
")",
"return",
"index"
]
| Return a valid elastic index generated from unique_id | [
"Return",
"a",
"valid",
"elastic",
"index",
"generated",
"from",
"unique_id"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elastic.py#L112-L117 | train |
chaoss/grimoirelab-elk | grimoire_elk/elastic.py | ElasticSearch._check_instance | def _check_instance(url, insecure):
"""Checks if there is an instance of Elasticsearch in url.
Actually, it checks if GET on the url returns a JSON document
with a field tagline "You know, for search",
and a field version.number.
:value url: url of the instance to check
:value insecure: don't verify ssl connection (boolean)
:returns: major version of Ellasticsearch, as string.
"""
res = grimoire_con(insecure).get(url)
if res.status_code != 200:
logger.error("Didn't get 200 OK from url %s", url)
raise ElasticConnectException
else:
try:
version_str = res.json()['version']['number']
version_major = version_str.split('.')[0]
return version_major
except Exception:
logger.error("Could not read proper welcome message from url %s",
ElasticSearch.anonymize_url(url))
logger.error("Message read: %s", res.text)
raise ElasticConnectException | python | def _check_instance(url, insecure):
"""Checks if there is an instance of Elasticsearch in url.
Actually, it checks if GET on the url returns a JSON document
with a field tagline "You know, for search",
and a field version.number.
:value url: url of the instance to check
:value insecure: don't verify ssl connection (boolean)
:returns: major version of Ellasticsearch, as string.
"""
res = grimoire_con(insecure).get(url)
if res.status_code != 200:
logger.error("Didn't get 200 OK from url %s", url)
raise ElasticConnectException
else:
try:
version_str = res.json()['version']['number']
version_major = version_str.split('.')[0]
return version_major
except Exception:
logger.error("Could not read proper welcome message from url %s",
ElasticSearch.anonymize_url(url))
logger.error("Message read: %s", res.text)
raise ElasticConnectException | [
"def",
"_check_instance",
"(",
"url",
",",
"insecure",
")",
":",
"res",
"=",
"grimoire_con",
"(",
"insecure",
")",
".",
"get",
"(",
"url",
")",
"if",
"res",
".",
"status_code",
"!=",
"200",
":",
"logger",
".",
"error",
"(",
"\"Didn't get 200 OK from url %s\"",
",",
"url",
")",
"raise",
"ElasticConnectException",
"else",
":",
"try",
":",
"version_str",
"=",
"res",
".",
"json",
"(",
")",
"[",
"'version'",
"]",
"[",
"'number'",
"]",
"version_major",
"=",
"version_str",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"return",
"version_major",
"except",
"Exception",
":",
"logger",
".",
"error",
"(",
"\"Could not read proper welcome message from url %s\"",
",",
"ElasticSearch",
".",
"anonymize_url",
"(",
"url",
")",
")",
"logger",
".",
"error",
"(",
"\"Message read: %s\"",
",",
"res",
".",
"text",
")",
"raise",
"ElasticConnectException"
]
| Checks if there is an instance of Elasticsearch in url.
Actually, it checks if GET on the url returns a JSON document
with a field tagline "You know, for search",
and a field version.number.
:value url: url of the instance to check
:value insecure: don't verify ssl connection (boolean)
:returns: major version of Ellasticsearch, as string. | [
"Checks",
"if",
"there",
"is",
"an",
"instance",
"of",
"Elasticsearch",
"in",
"url",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elastic.py#L120-L145 | train |
chaoss/grimoirelab-elk | grimoire_elk/elastic.py | ElasticSearch.safe_put_bulk | def safe_put_bulk(self, url, bulk_json):
""" Bulk PUT controlling unicode issues """
headers = {"Content-Type": "application/x-ndjson"}
try:
res = self.requests.put(url + '?refresh=true', data=bulk_json, headers=headers)
res.raise_for_status()
except UnicodeEncodeError:
# Related to body.encode('iso-8859-1'). mbox data
logger.error("Encondig error ... converting bulk to iso-8859-1")
bulk_json = bulk_json.encode('iso-8859-1', 'ignore')
res = self.requests.put(url, data=bulk_json, headers=headers)
res.raise_for_status()
result = res.json()
failed_items = []
if result['errors']:
# Due to multiple errors that may be thrown when inserting bulk data, only the first error is returned
failed_items = [item['index'] for item in result['items'] if 'error' in item['index']]
error = str(failed_items[0]['error'])
logger.error("Failed to insert data to ES: %s, %s", error, self.anonymize_url(url))
inserted_items = len(result['items']) - len(failed_items)
# The exception is currently not thrown to avoid stopping ocean uploading processes
try:
if failed_items:
raise ELKError(cause=error)
except ELKError:
pass
logger.debug("%i items uploaded to ES (%s)", inserted_items, self.anonymize_url(url))
return inserted_items | python | def safe_put_bulk(self, url, bulk_json):
""" Bulk PUT controlling unicode issues """
headers = {"Content-Type": "application/x-ndjson"}
try:
res = self.requests.put(url + '?refresh=true', data=bulk_json, headers=headers)
res.raise_for_status()
except UnicodeEncodeError:
# Related to body.encode('iso-8859-1'). mbox data
logger.error("Encondig error ... converting bulk to iso-8859-1")
bulk_json = bulk_json.encode('iso-8859-1', 'ignore')
res = self.requests.put(url, data=bulk_json, headers=headers)
res.raise_for_status()
result = res.json()
failed_items = []
if result['errors']:
# Due to multiple errors that may be thrown when inserting bulk data, only the first error is returned
failed_items = [item['index'] for item in result['items'] if 'error' in item['index']]
error = str(failed_items[0]['error'])
logger.error("Failed to insert data to ES: %s, %s", error, self.anonymize_url(url))
inserted_items = len(result['items']) - len(failed_items)
# The exception is currently not thrown to avoid stopping ocean uploading processes
try:
if failed_items:
raise ELKError(cause=error)
except ELKError:
pass
logger.debug("%i items uploaded to ES (%s)", inserted_items, self.anonymize_url(url))
return inserted_items | [
"def",
"safe_put_bulk",
"(",
"self",
",",
"url",
",",
"bulk_json",
")",
":",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"application/x-ndjson\"",
"}",
"try",
":",
"res",
"=",
"self",
".",
"requests",
".",
"put",
"(",
"url",
"+",
"'?refresh=true'",
",",
"data",
"=",
"bulk_json",
",",
"headers",
"=",
"headers",
")",
"res",
".",
"raise_for_status",
"(",
")",
"except",
"UnicodeEncodeError",
":",
"# Related to body.encode('iso-8859-1'). mbox data",
"logger",
".",
"error",
"(",
"\"Encondig error ... converting bulk to iso-8859-1\"",
")",
"bulk_json",
"=",
"bulk_json",
".",
"encode",
"(",
"'iso-8859-1'",
",",
"'ignore'",
")",
"res",
"=",
"self",
".",
"requests",
".",
"put",
"(",
"url",
",",
"data",
"=",
"bulk_json",
",",
"headers",
"=",
"headers",
")",
"res",
".",
"raise_for_status",
"(",
")",
"result",
"=",
"res",
".",
"json",
"(",
")",
"failed_items",
"=",
"[",
"]",
"if",
"result",
"[",
"'errors'",
"]",
":",
"# Due to multiple errors that may be thrown when inserting bulk data, only the first error is returned",
"failed_items",
"=",
"[",
"item",
"[",
"'index'",
"]",
"for",
"item",
"in",
"result",
"[",
"'items'",
"]",
"if",
"'error'",
"in",
"item",
"[",
"'index'",
"]",
"]",
"error",
"=",
"str",
"(",
"failed_items",
"[",
"0",
"]",
"[",
"'error'",
"]",
")",
"logger",
".",
"error",
"(",
"\"Failed to insert data to ES: %s, %s\"",
",",
"error",
",",
"self",
".",
"anonymize_url",
"(",
"url",
")",
")",
"inserted_items",
"=",
"len",
"(",
"result",
"[",
"'items'",
"]",
")",
"-",
"len",
"(",
"failed_items",
")",
"# The exception is currently not thrown to avoid stopping ocean uploading processes",
"try",
":",
"if",
"failed_items",
":",
"raise",
"ELKError",
"(",
"cause",
"=",
"error",
")",
"except",
"ELKError",
":",
"pass",
"logger",
".",
"debug",
"(",
"\"%i items uploaded to ES (%s)\"",
",",
"inserted_items",
",",
"self",
".",
"anonymize_url",
"(",
"url",
")",
")",
"return",
"inserted_items"
]
| Bulk PUT controlling unicode issues | [
"Bulk",
"PUT",
"controlling",
"unicode",
"issues"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elastic.py#L153-L187 | train |
chaoss/grimoirelab-elk | grimoire_elk/elastic.py | ElasticSearch.all_es_aliases | def all_es_aliases(self):
"""List all aliases used in ES"""
r = self.requests.get(self.url + "/_aliases", headers=HEADER_JSON, verify=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as ex:
logger.warning("Something went wrong when retrieving aliases on %s.",
self.anonymize_url(self.index_url))
logger.warning(ex)
return
aliases = []
for index in r.json().keys():
aliases.extend(list(r.json()[index]['aliases'].keys()))
aliases = list(set(aliases))
return aliases | python | def all_es_aliases(self):
"""List all aliases used in ES"""
r = self.requests.get(self.url + "/_aliases", headers=HEADER_JSON, verify=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as ex:
logger.warning("Something went wrong when retrieving aliases on %s.",
self.anonymize_url(self.index_url))
logger.warning(ex)
return
aliases = []
for index in r.json().keys():
aliases.extend(list(r.json()[index]['aliases'].keys()))
aliases = list(set(aliases))
return aliases | [
"def",
"all_es_aliases",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"requests",
".",
"get",
"(",
"self",
".",
"url",
"+",
"\"/_aliases\"",
",",
"headers",
"=",
"HEADER_JSON",
",",
"verify",
"=",
"False",
")",
"try",
":",
"r",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"ex",
":",
"logger",
".",
"warning",
"(",
"\"Something went wrong when retrieving aliases on %s.\"",
",",
"self",
".",
"anonymize_url",
"(",
"self",
".",
"index_url",
")",
")",
"logger",
".",
"warning",
"(",
"ex",
")",
"return",
"aliases",
"=",
"[",
"]",
"for",
"index",
"in",
"r",
".",
"json",
"(",
")",
".",
"keys",
"(",
")",
":",
"aliases",
".",
"extend",
"(",
"list",
"(",
"r",
".",
"json",
"(",
")",
"[",
"index",
"]",
"[",
"'aliases'",
"]",
".",
"keys",
"(",
")",
")",
")",
"aliases",
"=",
"list",
"(",
"set",
"(",
"aliases",
")",
")",
"return",
"aliases"
]
| List all aliases used in ES | [
"List",
"all",
"aliases",
"used",
"in",
"ES"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elastic.py#L189-L206 | train |
chaoss/grimoirelab-elk | grimoire_elk/elastic.py | ElasticSearch.list_aliases | def list_aliases(self):
"""List aliases linked to the index"""
# check alias doesn't exist
r = self.requests.get(self.index_url + "/_alias", headers=HEADER_JSON, verify=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as ex:
logger.warning("Something went wrong when retrieving aliases on %s.",
self.anonymize_url(self.index_url))
logger.warning(ex)
return
aliases = r.json()[self.index]['aliases']
return aliases | python | def list_aliases(self):
"""List aliases linked to the index"""
# check alias doesn't exist
r = self.requests.get(self.index_url + "/_alias", headers=HEADER_JSON, verify=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as ex:
logger.warning("Something went wrong when retrieving aliases on %s.",
self.anonymize_url(self.index_url))
logger.warning(ex)
return
aliases = r.json()[self.index]['aliases']
return aliases | [
"def",
"list_aliases",
"(",
"self",
")",
":",
"# check alias doesn't exist",
"r",
"=",
"self",
".",
"requests",
".",
"get",
"(",
"self",
".",
"index_url",
"+",
"\"/_alias\"",
",",
"headers",
"=",
"HEADER_JSON",
",",
"verify",
"=",
"False",
")",
"try",
":",
"r",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"ex",
":",
"logger",
".",
"warning",
"(",
"\"Something went wrong when retrieving aliases on %s.\"",
",",
"self",
".",
"anonymize_url",
"(",
"self",
".",
"index_url",
")",
")",
"logger",
".",
"warning",
"(",
"ex",
")",
"return",
"aliases",
"=",
"r",
".",
"json",
"(",
")",
"[",
"self",
".",
"index",
"]",
"[",
"'aliases'",
"]",
"return",
"aliases"
]
| List aliases linked to the index | [
"List",
"aliases",
"linked",
"to",
"the",
"index"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elastic.py#L208-L222 | train |
chaoss/grimoirelab-elk | grimoire_elk/elastic.py | ElasticSearch.bulk_upload | def bulk_upload(self, items, field_id):
"""Upload in controlled packs items to ES using bulk API"""
current = 0
new_items = 0 # total items added with bulk
bulk_json = ""
if not items:
return new_items
url = self.index_url + '/items/_bulk'
logger.debug("Adding items to %s (in %i packs)", self.anonymize_url(url), self.max_items_bulk)
task_init = time()
for item in items:
if current >= self.max_items_bulk:
task_init = time()
new_items += self.safe_put_bulk(url, bulk_json)
current = 0
json_size = sys.getsizeof(bulk_json) / (1024 * 1024)
logger.debug("bulk packet sent (%.2f sec, %i total, %.2f MB)"
% (time() - task_init, new_items, json_size))
bulk_json = ""
data_json = json.dumps(item)
bulk_json += '{"index" : {"_id" : "%s" } }\n' % (item[field_id])
bulk_json += data_json + "\n" # Bulk document
current += 1
if current > 0:
new_items += self.safe_put_bulk(url, bulk_json)
json_size = sys.getsizeof(bulk_json) / (1024 * 1024)
logger.debug("bulk packet sent (%.2f sec prev, %i total, %.2f MB)"
% (time() - task_init, new_items, json_size))
return new_items | python | def bulk_upload(self, items, field_id):
"""Upload in controlled packs items to ES using bulk API"""
current = 0
new_items = 0 # total items added with bulk
bulk_json = ""
if not items:
return new_items
url = self.index_url + '/items/_bulk'
logger.debug("Adding items to %s (in %i packs)", self.anonymize_url(url), self.max_items_bulk)
task_init = time()
for item in items:
if current >= self.max_items_bulk:
task_init = time()
new_items += self.safe_put_bulk(url, bulk_json)
current = 0
json_size = sys.getsizeof(bulk_json) / (1024 * 1024)
logger.debug("bulk packet sent (%.2f sec, %i total, %.2f MB)"
% (time() - task_init, new_items, json_size))
bulk_json = ""
data_json = json.dumps(item)
bulk_json += '{"index" : {"_id" : "%s" } }\n' % (item[field_id])
bulk_json += data_json + "\n" # Bulk document
current += 1
if current > 0:
new_items += self.safe_put_bulk(url, bulk_json)
json_size = sys.getsizeof(bulk_json) / (1024 * 1024)
logger.debug("bulk packet sent (%.2f sec prev, %i total, %.2f MB)"
% (time() - task_init, new_items, json_size))
return new_items | [
"def",
"bulk_upload",
"(",
"self",
",",
"items",
",",
"field_id",
")",
":",
"current",
"=",
"0",
"new_items",
"=",
"0",
"# total items added with bulk",
"bulk_json",
"=",
"\"\"",
"if",
"not",
"items",
":",
"return",
"new_items",
"url",
"=",
"self",
".",
"index_url",
"+",
"'/items/_bulk'",
"logger",
".",
"debug",
"(",
"\"Adding items to %s (in %i packs)\"",
",",
"self",
".",
"anonymize_url",
"(",
"url",
")",
",",
"self",
".",
"max_items_bulk",
")",
"task_init",
"=",
"time",
"(",
")",
"for",
"item",
"in",
"items",
":",
"if",
"current",
">=",
"self",
".",
"max_items_bulk",
":",
"task_init",
"=",
"time",
"(",
")",
"new_items",
"+=",
"self",
".",
"safe_put_bulk",
"(",
"url",
",",
"bulk_json",
")",
"current",
"=",
"0",
"json_size",
"=",
"sys",
".",
"getsizeof",
"(",
"bulk_json",
")",
"/",
"(",
"1024",
"*",
"1024",
")",
"logger",
".",
"debug",
"(",
"\"bulk packet sent (%.2f sec, %i total, %.2f MB)\"",
"%",
"(",
"time",
"(",
")",
"-",
"task_init",
",",
"new_items",
",",
"json_size",
")",
")",
"bulk_json",
"=",
"\"\"",
"data_json",
"=",
"json",
".",
"dumps",
"(",
"item",
")",
"bulk_json",
"+=",
"'{\"index\" : {\"_id\" : \"%s\" } }\\n'",
"%",
"(",
"item",
"[",
"field_id",
"]",
")",
"bulk_json",
"+=",
"data_json",
"+",
"\"\\n\"",
"# Bulk document",
"current",
"+=",
"1",
"if",
"current",
">",
"0",
":",
"new_items",
"+=",
"self",
".",
"safe_put_bulk",
"(",
"url",
",",
"bulk_json",
")",
"json_size",
"=",
"sys",
".",
"getsizeof",
"(",
"bulk_json",
")",
"/",
"(",
"1024",
"*",
"1024",
")",
"logger",
".",
"debug",
"(",
"\"bulk packet sent (%.2f sec prev, %i total, %.2f MB)\"",
"%",
"(",
"time",
"(",
")",
"-",
"task_init",
",",
"new_items",
",",
"json_size",
")",
")",
"return",
"new_items"
]
| Upload in controlled packs items to ES using bulk API | [
"Upload",
"in",
"controlled",
"packs",
"items",
"to",
"ES",
"using",
"bulk",
"API"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elastic.py#L270-L305 | train |
chaoss/grimoirelab-elk | grimoire_elk/elastic.py | ElasticSearch.all_properties | def all_properties(self):
"""Get all properties of a given index"""
properties = {}
r = self.requests.get(self.index_url + "/_mapping", headers=HEADER_JSON, verify=False)
try:
r.raise_for_status()
r_json = r.json()
if 'items' not in r_json[self.index]['mappings']:
return properties
if 'properties' not in r_json[self.index]['mappings']['items']:
return properties
properties = r_json[self.index]['mappings']['items']['properties']
except requests.exceptions.HTTPError as ex:
logger.error("Error all attributes for %s.", self.anonymize_url(self.index_url))
logger.error(ex)
return
return properties | python | def all_properties(self):
"""Get all properties of a given index"""
properties = {}
r = self.requests.get(self.index_url + "/_mapping", headers=HEADER_JSON, verify=False)
try:
r.raise_for_status()
r_json = r.json()
if 'items' not in r_json[self.index]['mappings']:
return properties
if 'properties' not in r_json[self.index]['mappings']['items']:
return properties
properties = r_json[self.index]['mappings']['items']['properties']
except requests.exceptions.HTTPError as ex:
logger.error("Error all attributes for %s.", self.anonymize_url(self.index_url))
logger.error(ex)
return
return properties | [
"def",
"all_properties",
"(",
"self",
")",
":",
"properties",
"=",
"{",
"}",
"r",
"=",
"self",
".",
"requests",
".",
"get",
"(",
"self",
".",
"index_url",
"+",
"\"/_mapping\"",
",",
"headers",
"=",
"HEADER_JSON",
",",
"verify",
"=",
"False",
")",
"try",
":",
"r",
".",
"raise_for_status",
"(",
")",
"r_json",
"=",
"r",
".",
"json",
"(",
")",
"if",
"'items'",
"not",
"in",
"r_json",
"[",
"self",
".",
"index",
"]",
"[",
"'mappings'",
"]",
":",
"return",
"properties",
"if",
"'properties'",
"not",
"in",
"r_json",
"[",
"self",
".",
"index",
"]",
"[",
"'mappings'",
"]",
"[",
"'items'",
"]",
":",
"return",
"properties",
"properties",
"=",
"r_json",
"[",
"self",
".",
"index",
"]",
"[",
"'mappings'",
"]",
"[",
"'items'",
"]",
"[",
"'properties'",
"]",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"ex",
":",
"logger",
".",
"error",
"(",
"\"Error all attributes for %s.\"",
",",
"self",
".",
"anonymize_url",
"(",
"self",
".",
"index_url",
")",
")",
"logger",
".",
"error",
"(",
"ex",
")",
"return",
"return",
"properties"
]
| Get all properties of a given index | [
"Get",
"all",
"properties",
"of",
"a",
"given",
"index"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elastic.py#L507-L528 | train |
chaoss/grimoirelab-elk | grimoire_elk/utils.py | get_kibiter_version | def get_kibiter_version(url):
"""
Return kibiter major number version
The url must point to the Elasticsearch used by Kibiter
"""
config_url = '.kibana/config/_search'
# Avoid having // in the URL because ES will fail
if url[-1] != '/':
url += "/"
url += config_url
r = requests.get(url)
r.raise_for_status()
if len(r.json()['hits']['hits']) == 0:
logger.error("Can not get the Kibiter version")
return None
version = r.json()['hits']['hits'][0]['_id']
# 5.4.0-SNAPSHOT
major_version = version.split(".", 1)[0]
return major_version | python | def get_kibiter_version(url):
"""
Return kibiter major number version
The url must point to the Elasticsearch used by Kibiter
"""
config_url = '.kibana/config/_search'
# Avoid having // in the URL because ES will fail
if url[-1] != '/':
url += "/"
url += config_url
r = requests.get(url)
r.raise_for_status()
if len(r.json()['hits']['hits']) == 0:
logger.error("Can not get the Kibiter version")
return None
version = r.json()['hits']['hits'][0]['_id']
# 5.4.0-SNAPSHOT
major_version = version.split(".", 1)[0]
return major_version | [
"def",
"get_kibiter_version",
"(",
"url",
")",
":",
"config_url",
"=",
"'.kibana/config/_search'",
"# Avoid having // in the URL because ES will fail",
"if",
"url",
"[",
"-",
"1",
"]",
"!=",
"'/'",
":",
"url",
"+=",
"\"/\"",
"url",
"+=",
"config_url",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"r",
".",
"raise_for_status",
"(",
")",
"if",
"len",
"(",
"r",
".",
"json",
"(",
")",
"[",
"'hits'",
"]",
"[",
"'hits'",
"]",
")",
"==",
"0",
":",
"logger",
".",
"error",
"(",
"\"Can not get the Kibiter version\"",
")",
"return",
"None",
"version",
"=",
"r",
".",
"json",
"(",
")",
"[",
"'hits'",
"]",
"[",
"'hits'",
"]",
"[",
"0",
"]",
"[",
"'_id'",
"]",
"# 5.4.0-SNAPSHOT",
"major_version",
"=",
"version",
".",
"split",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"return",
"major_version"
]
| Return kibiter major number version
The url must point to the Elasticsearch used by Kibiter | [
"Return",
"kibiter",
"major",
"number",
"version"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/utils.py#L262-L284 | train |
chaoss/grimoirelab-elk | grimoire_elk/utils.py | get_params | def get_params():
""" Get params definition from ElasticOcean and from all the backends """
parser = get_params_parser()
args = parser.parse_args()
if not args.enrich_only and not args.only_identities and not args.only_studies:
if not args.index:
# Check that the raw index name is defined
print("[error] --index <name> param is required when collecting items from raw")
sys.exit(1)
return args | python | def get_params():
""" Get params definition from ElasticOcean and from all the backends """
parser = get_params_parser()
args = parser.parse_args()
if not args.enrich_only and not args.only_identities and not args.only_studies:
if not args.index:
# Check that the raw index name is defined
print("[error] --index <name> param is required when collecting items from raw")
sys.exit(1)
return args | [
"def",
"get_params",
"(",
")",
":",
"parser",
"=",
"get_params_parser",
"(",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"not",
"args",
".",
"enrich_only",
"and",
"not",
"args",
".",
"only_identities",
"and",
"not",
"args",
".",
"only_studies",
":",
"if",
"not",
"args",
".",
"index",
":",
"# Check that the raw index name is defined",
"print",
"(",
"\"[error] --index <name> param is required when collecting items from raw\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"args"
]
| Get params definition from ElasticOcean and from all the backends | [
"Get",
"params",
"definition",
"from",
"ElasticOcean",
"and",
"from",
"all",
"the",
"backends"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/utils.py#L376-L388 | train |
chaoss/grimoirelab-elk | grimoire_elk/utils.py | get_time_diff_days | def get_time_diff_days(start_txt, end_txt):
''' Number of days between two days '''
if start_txt is None or end_txt is None:
return None
start = parser.parse(start_txt)
end = parser.parse(end_txt)
seconds_day = float(60 * 60 * 24)
diff_days = \
(end - start).total_seconds() / seconds_day
diff_days = float('%.2f' % diff_days)
return diff_days | python | def get_time_diff_days(start_txt, end_txt):
''' Number of days between two days '''
if start_txt is None or end_txt is None:
return None
start = parser.parse(start_txt)
end = parser.parse(end_txt)
seconds_day = float(60 * 60 * 24)
diff_days = \
(end - start).total_seconds() / seconds_day
diff_days = float('%.2f' % diff_days)
return diff_days | [
"def",
"get_time_diff_days",
"(",
"start_txt",
",",
"end_txt",
")",
":",
"if",
"start_txt",
"is",
"None",
"or",
"end_txt",
"is",
"None",
":",
"return",
"None",
"start",
"=",
"parser",
".",
"parse",
"(",
"start_txt",
")",
"end",
"=",
"parser",
".",
"parse",
"(",
"end_txt",
")",
"seconds_day",
"=",
"float",
"(",
"60",
"*",
"60",
"*",
"24",
")",
"diff_days",
"=",
"(",
"end",
"-",
"start",
")",
".",
"total_seconds",
"(",
")",
"/",
"seconds_day",
"diff_days",
"=",
"float",
"(",
"'%.2f'",
"%",
"diff_days",
")",
"return",
"diff_days"
]
| Number of days between two days | [
"Number",
"of",
"days",
"between",
"two",
"days"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/utils.py#L391-L405 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/jira.py | JiraEnrich.enrich_fields | def enrich_fields(cls, fields, eitem):
"""Enrich the fields property of an issue.
Loops through al properties in issue['fields'],
using those that are relevant to enrich eitem with new properties.
Those properties are user defined, depending on options
configured in Jira. For example, if SCRUM is activated,
we have a field named "Story Points".
:param fields: fields property of an issue
:param eitem: enriched item, which will be modified adding more properties
"""
for field in fields:
if field.startswith('customfield_'):
if type(fields[field]) is dict:
if 'name' in fields[field]:
if fields[field]['name'] == "Story Points":
eitem['story_points'] = fields[field]['value']
elif fields[field]['name'] == "Sprint":
value = fields[field]['value']
if value:
sprint = value[0].partition(",name=")[2].split(',')[0]
sprint_start = value[0].partition(",startDate=")[2].split(',')[0]
sprint_end = value[0].partition(",endDate=")[2].split(',')[0]
sprint_complete = value[0].partition(",completeDate=")[2].split(',')[0]
eitem['sprint'] = sprint
eitem['sprint_start'] = cls.fix_value_null(sprint_start)
eitem['sprint_end'] = cls.fix_value_null(sprint_end)
eitem['sprint_complete'] = cls.fix_value_null(sprint_complete) | python | def enrich_fields(cls, fields, eitem):
"""Enrich the fields property of an issue.
Loops through al properties in issue['fields'],
using those that are relevant to enrich eitem with new properties.
Those properties are user defined, depending on options
configured in Jira. For example, if SCRUM is activated,
we have a field named "Story Points".
:param fields: fields property of an issue
:param eitem: enriched item, which will be modified adding more properties
"""
for field in fields:
if field.startswith('customfield_'):
if type(fields[field]) is dict:
if 'name' in fields[field]:
if fields[field]['name'] == "Story Points":
eitem['story_points'] = fields[field]['value']
elif fields[field]['name'] == "Sprint":
value = fields[field]['value']
if value:
sprint = value[0].partition(",name=")[2].split(',')[0]
sprint_start = value[0].partition(",startDate=")[2].split(',')[0]
sprint_end = value[0].partition(",endDate=")[2].split(',')[0]
sprint_complete = value[0].partition(",completeDate=")[2].split(',')[0]
eitem['sprint'] = sprint
eitem['sprint_start'] = cls.fix_value_null(sprint_start)
eitem['sprint_end'] = cls.fix_value_null(sprint_end)
eitem['sprint_complete'] = cls.fix_value_null(sprint_complete) | [
"def",
"enrich_fields",
"(",
"cls",
",",
"fields",
",",
"eitem",
")",
":",
"for",
"field",
"in",
"fields",
":",
"if",
"field",
".",
"startswith",
"(",
"'customfield_'",
")",
":",
"if",
"type",
"(",
"fields",
"[",
"field",
"]",
")",
"is",
"dict",
":",
"if",
"'name'",
"in",
"fields",
"[",
"field",
"]",
":",
"if",
"fields",
"[",
"field",
"]",
"[",
"'name'",
"]",
"==",
"\"Story Points\"",
":",
"eitem",
"[",
"'story_points'",
"]",
"=",
"fields",
"[",
"field",
"]",
"[",
"'value'",
"]",
"elif",
"fields",
"[",
"field",
"]",
"[",
"'name'",
"]",
"==",
"\"Sprint\"",
":",
"value",
"=",
"fields",
"[",
"field",
"]",
"[",
"'value'",
"]",
"if",
"value",
":",
"sprint",
"=",
"value",
"[",
"0",
"]",
".",
"partition",
"(",
"\",name=\"",
")",
"[",
"2",
"]",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
"sprint_start",
"=",
"value",
"[",
"0",
"]",
".",
"partition",
"(",
"\",startDate=\"",
")",
"[",
"2",
"]",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
"sprint_end",
"=",
"value",
"[",
"0",
"]",
".",
"partition",
"(",
"\",endDate=\"",
")",
"[",
"2",
"]",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
"sprint_complete",
"=",
"value",
"[",
"0",
"]",
".",
"partition",
"(",
"\",completeDate=\"",
")",
"[",
"2",
"]",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
"eitem",
"[",
"'sprint'",
"]",
"=",
"sprint",
"eitem",
"[",
"'sprint_start'",
"]",
"=",
"cls",
".",
"fix_value_null",
"(",
"sprint_start",
")",
"eitem",
"[",
"'sprint_end'",
"]",
"=",
"cls",
".",
"fix_value_null",
"(",
"sprint_end",
")",
"eitem",
"[",
"'sprint_complete'",
"]",
"=",
"cls",
".",
"fix_value_null",
"(",
"sprint_complete",
")"
]
| Enrich the fields property of an issue.
Loops through al properties in issue['fields'],
using those that are relevant to enrich eitem with new properties.
Those properties are user defined, depending on options
configured in Jira. For example, if SCRUM is activated,
we have a field named "Story Points".
:param fields: fields property of an issue
:param eitem: enriched item, which will be modified adding more properties | [
"Enrich",
"the",
"fields",
"property",
"of",
"an",
"issue",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/jira.py#L197-L226 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/mediawiki.py | MediaWikiEnrich.get_review_sh | def get_review_sh(self, revision, item):
""" Add sorting hat enrichment fields for the author of the revision """
identity = self.get_sh_identity(revision)
update = parser.parse(item[self.get_field_date()])
erevision = self.get_item_sh_fields(identity, update)
return erevision | python | def get_review_sh(self, revision, item):
""" Add sorting hat enrichment fields for the author of the revision """
identity = self.get_sh_identity(revision)
update = parser.parse(item[self.get_field_date()])
erevision = self.get_item_sh_fields(identity, update)
return erevision | [
"def",
"get_review_sh",
"(",
"self",
",",
"revision",
",",
"item",
")",
":",
"identity",
"=",
"self",
".",
"get_sh_identity",
"(",
"revision",
")",
"update",
"=",
"parser",
".",
"parse",
"(",
"item",
"[",
"self",
".",
"get_field_date",
"(",
")",
"]",
")",
"erevision",
"=",
"self",
".",
"get_item_sh_fields",
"(",
"identity",
",",
"update",
")",
"return",
"erevision"
]
| Add sorting hat enrichment fields for the author of the revision | [
"Add",
"sorting",
"hat",
"enrichment",
"fields",
"for",
"the",
"author",
"of",
"the",
"revision"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/mediawiki.py#L109-L116 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/github.py | GitHubEnrich.get_github_cache | def get_github_cache(self, kind, key_):
""" Get cache data for items of _type using key_ as the cache dict key """
cache = {}
res_size = 100 # best size?
from_ = 0
index_github = "github/" + kind
url = self.elastic.url + "/" + index_github
url += "/_search" + "?" + "size=%i" % res_size
r = self.requests.get(url)
type_items = r.json()
if 'hits' not in type_items:
logger.info("No github %s data in ES" % (kind))
else:
while len(type_items['hits']['hits']) > 0:
for hit in type_items['hits']['hits']:
item = hit['_source']
cache[item[key_]] = item
from_ += res_size
r = self.requests.get(url + "&from=%i" % from_)
type_items = r.json()
if 'hits' not in type_items:
break
return cache | python | def get_github_cache(self, kind, key_):
""" Get cache data for items of _type using key_ as the cache dict key """
cache = {}
res_size = 100 # best size?
from_ = 0
index_github = "github/" + kind
url = self.elastic.url + "/" + index_github
url += "/_search" + "?" + "size=%i" % res_size
r = self.requests.get(url)
type_items = r.json()
if 'hits' not in type_items:
logger.info("No github %s data in ES" % (kind))
else:
while len(type_items['hits']['hits']) > 0:
for hit in type_items['hits']['hits']:
item = hit['_source']
cache[item[key_]] = item
from_ += res_size
r = self.requests.get(url + "&from=%i" % from_)
type_items = r.json()
if 'hits' not in type_items:
break
return cache | [
"def",
"get_github_cache",
"(",
"self",
",",
"kind",
",",
"key_",
")",
":",
"cache",
"=",
"{",
"}",
"res_size",
"=",
"100",
"# best size?",
"from_",
"=",
"0",
"index_github",
"=",
"\"github/\"",
"+",
"kind",
"url",
"=",
"self",
".",
"elastic",
".",
"url",
"+",
"\"/\"",
"+",
"index_github",
"url",
"+=",
"\"/_search\"",
"+",
"\"?\"",
"+",
"\"size=%i\"",
"%",
"res_size",
"r",
"=",
"self",
".",
"requests",
".",
"get",
"(",
"url",
")",
"type_items",
"=",
"r",
".",
"json",
"(",
")",
"if",
"'hits'",
"not",
"in",
"type_items",
":",
"logger",
".",
"info",
"(",
"\"No github %s data in ES\"",
"%",
"(",
"kind",
")",
")",
"else",
":",
"while",
"len",
"(",
"type_items",
"[",
"'hits'",
"]",
"[",
"'hits'",
"]",
")",
">",
"0",
":",
"for",
"hit",
"in",
"type_items",
"[",
"'hits'",
"]",
"[",
"'hits'",
"]",
":",
"item",
"=",
"hit",
"[",
"'_source'",
"]",
"cache",
"[",
"item",
"[",
"key_",
"]",
"]",
"=",
"item",
"from_",
"+=",
"res_size",
"r",
"=",
"self",
".",
"requests",
".",
"get",
"(",
"url",
"+",
"\"&from=%i\"",
"%",
"from_",
")",
"type_items",
"=",
"r",
".",
"json",
"(",
")",
"if",
"'hits'",
"not",
"in",
"type_items",
":",
"break",
"return",
"cache"
]
| Get cache data for items of _type using key_ as the cache dict key | [
"Get",
"cache",
"data",
"for",
"items",
"of",
"_type",
"using",
"key_",
"as",
"the",
"cache",
"dict",
"key"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/github.py#L194-L222 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/github.py | GitHubEnrich.get_time_to_first_attention | def get_time_to_first_attention(self, item):
"""Get the first date at which a comment or reaction was made to the issue by someone
other than the user who created the issue
"""
comment_dates = [str_to_datetime(comment['created_at']) for comment in item['comments_data']
if item['user']['login'] != comment['user']['login']]
reaction_dates = [str_to_datetime(reaction['created_at']) for reaction in item['reactions_data']
if item['user']['login'] != reaction['user']['login']]
reaction_dates.extend(comment_dates)
if reaction_dates:
return min(reaction_dates)
return None | python | def get_time_to_first_attention(self, item):
"""Get the first date at which a comment or reaction was made to the issue by someone
other than the user who created the issue
"""
comment_dates = [str_to_datetime(comment['created_at']) for comment in item['comments_data']
if item['user']['login'] != comment['user']['login']]
reaction_dates = [str_to_datetime(reaction['created_at']) for reaction in item['reactions_data']
if item['user']['login'] != reaction['user']['login']]
reaction_dates.extend(comment_dates)
if reaction_dates:
return min(reaction_dates)
return None | [
"def",
"get_time_to_first_attention",
"(",
"self",
",",
"item",
")",
":",
"comment_dates",
"=",
"[",
"str_to_datetime",
"(",
"comment",
"[",
"'created_at'",
"]",
")",
"for",
"comment",
"in",
"item",
"[",
"'comments_data'",
"]",
"if",
"item",
"[",
"'user'",
"]",
"[",
"'login'",
"]",
"!=",
"comment",
"[",
"'user'",
"]",
"[",
"'login'",
"]",
"]",
"reaction_dates",
"=",
"[",
"str_to_datetime",
"(",
"reaction",
"[",
"'created_at'",
"]",
")",
"for",
"reaction",
"in",
"item",
"[",
"'reactions_data'",
"]",
"if",
"item",
"[",
"'user'",
"]",
"[",
"'login'",
"]",
"!=",
"reaction",
"[",
"'user'",
"]",
"[",
"'login'",
"]",
"]",
"reaction_dates",
".",
"extend",
"(",
"comment_dates",
")",
"if",
"reaction_dates",
":",
"return",
"min",
"(",
"reaction_dates",
")",
"return",
"None"
]
| Get the first date at which a comment or reaction was made to the issue by someone
other than the user who created the issue | [
"Get",
"the",
"first",
"date",
"at",
"which",
"a",
"comment",
"or",
"reaction",
"was",
"made",
"to",
"the",
"issue",
"by",
"someone",
"other",
"than",
"the",
"user",
"who",
"created",
"the",
"issue"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/github.py#L267-L278 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/github.py | GitHubEnrich.get_time_to_merge_request_response | def get_time_to_merge_request_response(self, item):
"""Get the first date at which a review was made on the PR by someone
other than the user who created the PR
"""
review_dates = [str_to_datetime(review['created_at']) for review in item['review_comments_data']
if item['user']['login'] != review['user']['login']]
if review_dates:
return min(review_dates)
return None | python | def get_time_to_merge_request_response(self, item):
"""Get the first date at which a review was made on the PR by someone
other than the user who created the PR
"""
review_dates = [str_to_datetime(review['created_at']) for review in item['review_comments_data']
if item['user']['login'] != review['user']['login']]
if review_dates:
return min(review_dates)
return None | [
"def",
"get_time_to_merge_request_response",
"(",
"self",
",",
"item",
")",
":",
"review_dates",
"=",
"[",
"str_to_datetime",
"(",
"review",
"[",
"'created_at'",
"]",
")",
"for",
"review",
"in",
"item",
"[",
"'review_comments_data'",
"]",
"if",
"item",
"[",
"'user'",
"]",
"[",
"'login'",
"]",
"!=",
"review",
"[",
"'user'",
"]",
"[",
"'login'",
"]",
"]",
"if",
"review_dates",
":",
"return",
"min",
"(",
"review_dates",
")",
"return",
"None"
]
| Get the first date at which a review was made on the PR by someone
other than the user who created the PR | [
"Get",
"the",
"first",
"date",
"at",
"which",
"a",
"review",
"was",
"made",
"on",
"the",
"PR",
"by",
"someone",
"other",
"than",
"the",
"user",
"who",
"created",
"the",
"PR"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/github.py#L280-L288 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/crates.py | CratesEnrich.get_rich_events | def get_rich_events(self, item):
"""
In the events there are some common fields with the crate. The name
of the field must be the same in the create and in the downloads event
so we can filer using it in crate and event at the same time.
* Fields that don't change: the field does not change with the events
in a create so the value is always the same in the events of a create.
* Fields that change: the value of the field changes with events
"""
if "version_downloads_data" not in item['data']:
return []
# To get values from the task
eitem = self.get_rich_item(item)
for sample in item['data']["version_downloads_data"]["version_downloads"]:
event = deepcopy(eitem)
event['download_sample_id'] = sample['id']
event['sample_date'] = sample['date']
sample_date = parser.parse(event['sample_date'])
event['sample_version'] = sample['version']
event['sample_downloads'] = sample['downloads']
event.update(self.get_grimoire_fields(sample_date.isoformat(), "downloads_event"))
yield event | python | def get_rich_events(self, item):
"""
In the events there are some common fields with the crate. The name
of the field must be the same in the create and in the downloads event
so we can filer using it in crate and event at the same time.
* Fields that don't change: the field does not change with the events
in a create so the value is always the same in the events of a create.
* Fields that change: the value of the field changes with events
"""
if "version_downloads_data" not in item['data']:
return []
# To get values from the task
eitem = self.get_rich_item(item)
for sample in item['data']["version_downloads_data"]["version_downloads"]:
event = deepcopy(eitem)
event['download_sample_id'] = sample['id']
event['sample_date'] = sample['date']
sample_date = parser.parse(event['sample_date'])
event['sample_version'] = sample['version']
event['sample_downloads'] = sample['downloads']
event.update(self.get_grimoire_fields(sample_date.isoformat(), "downloads_event"))
yield event | [
"def",
"get_rich_events",
"(",
"self",
",",
"item",
")",
":",
"if",
"\"version_downloads_data\"",
"not",
"in",
"item",
"[",
"'data'",
"]",
":",
"return",
"[",
"]",
"# To get values from the task",
"eitem",
"=",
"self",
".",
"get_rich_item",
"(",
"item",
")",
"for",
"sample",
"in",
"item",
"[",
"'data'",
"]",
"[",
"\"version_downloads_data\"",
"]",
"[",
"\"version_downloads\"",
"]",
":",
"event",
"=",
"deepcopy",
"(",
"eitem",
")",
"event",
"[",
"'download_sample_id'",
"]",
"=",
"sample",
"[",
"'id'",
"]",
"event",
"[",
"'sample_date'",
"]",
"=",
"sample",
"[",
"'date'",
"]",
"sample_date",
"=",
"parser",
".",
"parse",
"(",
"event",
"[",
"'sample_date'",
"]",
")",
"event",
"[",
"'sample_version'",
"]",
"=",
"sample",
"[",
"'version'",
"]",
"event",
"[",
"'sample_downloads'",
"]",
"=",
"sample",
"[",
"'downloads'",
"]",
"event",
".",
"update",
"(",
"self",
".",
"get_grimoire_fields",
"(",
"sample_date",
".",
"isoformat",
"(",
")",
",",
"\"downloads_event\"",
")",
")",
"yield",
"event"
]
| In the events there are some common fields with the crate. The name
of the field must be the same in the create and in the downloads event
so we can filer using it in crate and event at the same time.
* Fields that don't change: the field does not change with the events
in a create so the value is always the same in the events of a create.
* Fields that change: the value of the field changes with events | [
"In",
"the",
"events",
"there",
"are",
"some",
"common",
"fields",
"with",
"the",
"crate",
".",
"The",
"name",
"of",
"the",
"field",
"must",
"be",
"the",
"same",
"in",
"the",
"create",
"and",
"in",
"the",
"downloads",
"event",
"so",
"we",
"can",
"filer",
"using",
"it",
"in",
"crate",
"and",
"event",
"at",
"the",
"same",
"time",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/crates.py#L99-L125 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/twitter.py | TwitterEnrich.get_item_project | def get_item_project(self, eitem):
""" Get project mapping enrichment field.
Twitter mappings is pretty special so it needs a special
implementacion.
"""
project = None
eitem_project = {}
ds_name = self.get_connector_name() # data source name in projects map
if ds_name not in self.prjs_map:
return eitem_project
for tag in eitem['hashtags_analyzed']:
# lcanas: hashtag provided in projects.json file should not be case sensitive T6876
tags2project = CaseInsensitiveDict(self.prjs_map[ds_name])
if tag in tags2project:
project = tags2project[tag]
break
if project is None:
project = DEFAULT_PROJECT
eitem_project = {"project": project}
eitem_project.update(self.add_project_levels(project))
return eitem_project | python | def get_item_project(self, eitem):
""" Get project mapping enrichment field.
Twitter mappings is pretty special so it needs a special
implementacion.
"""
project = None
eitem_project = {}
ds_name = self.get_connector_name() # data source name in projects map
if ds_name not in self.prjs_map:
return eitem_project
for tag in eitem['hashtags_analyzed']:
# lcanas: hashtag provided in projects.json file should not be case sensitive T6876
tags2project = CaseInsensitiveDict(self.prjs_map[ds_name])
if tag in tags2project:
project = tags2project[tag]
break
if project is None:
project = DEFAULT_PROJECT
eitem_project = {"project": project}
eitem_project.update(self.add_project_levels(project))
return eitem_project | [
"def",
"get_item_project",
"(",
"self",
",",
"eitem",
")",
":",
"project",
"=",
"None",
"eitem_project",
"=",
"{",
"}",
"ds_name",
"=",
"self",
".",
"get_connector_name",
"(",
")",
"# data source name in projects map",
"if",
"ds_name",
"not",
"in",
"self",
".",
"prjs_map",
":",
"return",
"eitem_project",
"for",
"tag",
"in",
"eitem",
"[",
"'hashtags_analyzed'",
"]",
":",
"# lcanas: hashtag provided in projects.json file should not be case sensitive T6876",
"tags2project",
"=",
"CaseInsensitiveDict",
"(",
"self",
".",
"prjs_map",
"[",
"ds_name",
"]",
")",
"if",
"tag",
"in",
"tags2project",
":",
"project",
"=",
"tags2project",
"[",
"tag",
"]",
"break",
"if",
"project",
"is",
"None",
":",
"project",
"=",
"DEFAULT_PROJECT",
"eitem_project",
"=",
"{",
"\"project\"",
":",
"project",
"}",
"eitem_project",
".",
"update",
"(",
"self",
".",
"add_project_levels",
"(",
"project",
")",
")",
"return",
"eitem_project"
]
| Get project mapping enrichment field.
Twitter mappings is pretty special so it needs a special
implementacion. | [
"Get",
"project",
"mapping",
"enrichment",
"field",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/twitter.py#L93-L121 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/jenkins.py | JenkinsEnrich.get_fields_from_job_name | def get_fields_from_job_name(self, job_name):
"""Analyze a Jenkins job name, producing a dictionary
The produced dictionary will include information about the category
and subcategory of the job name, and any extra information which
could be useful.
For each deployment of a Jenkins dashboard, an implementation of
this function should be produced, according to the needs of the users.
:param job: job name to Analyze
:returns: dictionary with categorization information
"""
extra_fields = {
'category': None,
'installer': None,
'scenario': None,
'testproject': None,
'pod': None,
'loop': None,
'branch': None
}
try:
components = job_name.split('-')
if len(components) < 2:
return extra_fields
kind = components[1]
if kind == 'os':
extra_fields['category'] = 'parent/main'
extra_fields['installer'] = components[0]
extra_fields['scenario'] = '-'.join(components[2:-3])
elif kind == 'deploy':
extra_fields['category'] = 'deploy'
extra_fields['installer'] = components[0]
else:
extra_fields['category'] = 'test'
extra_fields['testproject'] = components[0]
extra_fields['installer'] = components[1]
extra_fields['pod'] = components[-3]
extra_fields['loop'] = components[-2]
extra_fields['branch'] = components[-1]
except IndexError as ex:
# Just DEBUG level because it is just for OPNFV
logger.debug('Problems parsing job name %s', job_name)
logger.debug(ex)
return extra_fields | python | def get_fields_from_job_name(self, job_name):
"""Analyze a Jenkins job name, producing a dictionary
The produced dictionary will include information about the category
and subcategory of the job name, and any extra information which
could be useful.
For each deployment of a Jenkins dashboard, an implementation of
this function should be produced, according to the needs of the users.
:param job: job name to Analyze
:returns: dictionary with categorization information
"""
extra_fields = {
'category': None,
'installer': None,
'scenario': None,
'testproject': None,
'pod': None,
'loop': None,
'branch': None
}
try:
components = job_name.split('-')
if len(components) < 2:
return extra_fields
kind = components[1]
if kind == 'os':
extra_fields['category'] = 'parent/main'
extra_fields['installer'] = components[0]
extra_fields['scenario'] = '-'.join(components[2:-3])
elif kind == 'deploy':
extra_fields['category'] = 'deploy'
extra_fields['installer'] = components[0]
else:
extra_fields['category'] = 'test'
extra_fields['testproject'] = components[0]
extra_fields['installer'] = components[1]
extra_fields['pod'] = components[-3]
extra_fields['loop'] = components[-2]
extra_fields['branch'] = components[-1]
except IndexError as ex:
# Just DEBUG level because it is just for OPNFV
logger.debug('Problems parsing job name %s', job_name)
logger.debug(ex)
return extra_fields | [
"def",
"get_fields_from_job_name",
"(",
"self",
",",
"job_name",
")",
":",
"extra_fields",
"=",
"{",
"'category'",
":",
"None",
",",
"'installer'",
":",
"None",
",",
"'scenario'",
":",
"None",
",",
"'testproject'",
":",
"None",
",",
"'pod'",
":",
"None",
",",
"'loop'",
":",
"None",
",",
"'branch'",
":",
"None",
"}",
"try",
":",
"components",
"=",
"job_name",
".",
"split",
"(",
"'-'",
")",
"if",
"len",
"(",
"components",
")",
"<",
"2",
":",
"return",
"extra_fields",
"kind",
"=",
"components",
"[",
"1",
"]",
"if",
"kind",
"==",
"'os'",
":",
"extra_fields",
"[",
"'category'",
"]",
"=",
"'parent/main'",
"extra_fields",
"[",
"'installer'",
"]",
"=",
"components",
"[",
"0",
"]",
"extra_fields",
"[",
"'scenario'",
"]",
"=",
"'-'",
".",
"join",
"(",
"components",
"[",
"2",
":",
"-",
"3",
"]",
")",
"elif",
"kind",
"==",
"'deploy'",
":",
"extra_fields",
"[",
"'category'",
"]",
"=",
"'deploy'",
"extra_fields",
"[",
"'installer'",
"]",
"=",
"components",
"[",
"0",
"]",
"else",
":",
"extra_fields",
"[",
"'category'",
"]",
"=",
"'test'",
"extra_fields",
"[",
"'testproject'",
"]",
"=",
"components",
"[",
"0",
"]",
"extra_fields",
"[",
"'installer'",
"]",
"=",
"components",
"[",
"1",
"]",
"extra_fields",
"[",
"'pod'",
"]",
"=",
"components",
"[",
"-",
"3",
"]",
"extra_fields",
"[",
"'loop'",
"]",
"=",
"components",
"[",
"-",
"2",
"]",
"extra_fields",
"[",
"'branch'",
"]",
"=",
"components",
"[",
"-",
"1",
"]",
"except",
"IndexError",
"as",
"ex",
":",
"# Just DEBUG level because it is just for OPNFV",
"logger",
".",
"debug",
"(",
"'Problems parsing job name %s'",
",",
"job_name",
")",
"logger",
".",
"debug",
"(",
"ex",
")",
"return",
"extra_fields"
]
| Analyze a Jenkins job name, producing a dictionary
The produced dictionary will include information about the category
and subcategory of the job name, and any extra information which
could be useful.
For each deployment of a Jenkins dashboard, an implementation of
this function should be produced, according to the needs of the users.
:param job: job name to Analyze
:returns: dictionary with categorization information | [
"Analyze",
"a",
"Jenkins",
"job",
"name",
"producing",
"a",
"dictionary"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/jenkins.py#L122-L174 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/jenkins.py | JenkinsEnrich.extract_builton | def extract_builton(self, built_on, regex):
"""Extracts node name using a regular expression. Node name is expected to
be group 1.
"""
pattern = re.compile(regex, re.M | re.I)
match = pattern.search(built_on)
if match and len(match.groups()) >= 1:
node_name = match.group(1)
else:
msg = "Node name not extracted, using builtOn as it is: " + regex + ":" + built_on
logger.warning(msg)
node_name = built_on
return node_name | python | def extract_builton(self, built_on, regex):
"""Extracts node name using a regular expression. Node name is expected to
be group 1.
"""
pattern = re.compile(regex, re.M | re.I)
match = pattern.search(built_on)
if match and len(match.groups()) >= 1:
node_name = match.group(1)
else:
msg = "Node name not extracted, using builtOn as it is: " + regex + ":" + built_on
logger.warning(msg)
node_name = built_on
return node_name | [
"def",
"extract_builton",
"(",
"self",
",",
"built_on",
",",
"regex",
")",
":",
"pattern",
"=",
"re",
".",
"compile",
"(",
"regex",
",",
"re",
".",
"M",
"|",
"re",
".",
"I",
")",
"match",
"=",
"pattern",
".",
"search",
"(",
"built_on",
")",
"if",
"match",
"and",
"len",
"(",
"match",
".",
"groups",
"(",
")",
")",
">=",
"1",
":",
"node_name",
"=",
"match",
".",
"group",
"(",
"1",
")",
"else",
":",
"msg",
"=",
"\"Node name not extracted, using builtOn as it is: \"",
"+",
"regex",
"+",
"\":\"",
"+",
"built_on",
"logger",
".",
"warning",
"(",
"msg",
")",
"node_name",
"=",
"built_on",
"return",
"node_name"
]
| Extracts node name using a regular expression. Node name is expected to
be group 1. | [
"Extracts",
"node",
"name",
"using",
"a",
"regular",
"expression",
".",
"Node",
"name",
"is",
"expected",
"to",
"be",
"group",
"1",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/jenkins.py#L176-L189 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/study_ceres_onion.py | onion_study | def onion_study(in_conn, out_conn, data_source):
"""Build and index for onion from a given Git index.
:param in_conn: ESPandasConnector to read from.
:param out_conn: ESPandasConnector to write to.
:param data_source: name of the date source to generate onion from.
:return: number of documents written in ElasticSearch enriched index.
"""
onion = OnionStudy(in_connector=in_conn, out_connector=out_conn, data_source=data_source)
ndocs = onion.analyze()
return ndocs | python | def onion_study(in_conn, out_conn, data_source):
"""Build and index for onion from a given Git index.
:param in_conn: ESPandasConnector to read from.
:param out_conn: ESPandasConnector to write to.
:param data_source: name of the date source to generate onion from.
:return: number of documents written in ElasticSearch enriched index.
"""
onion = OnionStudy(in_connector=in_conn, out_connector=out_conn, data_source=data_source)
ndocs = onion.analyze()
return ndocs | [
"def",
"onion_study",
"(",
"in_conn",
",",
"out_conn",
",",
"data_source",
")",
":",
"onion",
"=",
"OnionStudy",
"(",
"in_connector",
"=",
"in_conn",
",",
"out_connector",
"=",
"out_conn",
",",
"data_source",
"=",
"data_source",
")",
"ndocs",
"=",
"onion",
".",
"analyze",
"(",
")",
"return",
"ndocs"
]
| Build and index for onion from a given Git index.
:param in_conn: ESPandasConnector to read from.
:param out_conn: ESPandasConnector to write to.
:param data_source: name of the date source to generate onion from.
:return: number of documents written in ElasticSearch enriched index. | [
"Build",
"and",
"index",
"for",
"onion",
"from",
"a",
"given",
"Git",
"index",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/study_ceres_onion.py#L371-L381 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/study_ceres_onion.py | ESOnionConnector.read_block | def read_block(self, size=None, from_date=None):
"""Read author commits by Quarter, Org and Project.
:param from_date: not used here. Incremental mode not supported yet.
:param size: not used here.
:return: DataFrame with commit count per author, split by quarter, org and project.
"""
# Get quarters corresponding to All items (Incremental mode NOT SUPPORTED)
quarters = self.__quarters()
for quarter in quarters:
logger.info(self.__log_prefix + " Quarter: " + str(quarter))
date_range = {self._timeframe_field: {'gte': quarter.start_time, 'lte': quarter.end_time}}
orgs = self.__list_uniques(date_range, self.AUTHOR_ORG)
projects = self.__list_uniques(date_range, self.PROJECT)
# Get global data
s = self.__build_search(date_range)
response = s.execute()
for timing in response.aggregations[self.TIMEFRAME].buckets:
yield self.__build_dataframe(timing).copy()
# Get global data by Org
for org_name in orgs:
logger.info(self.__log_prefix + " Quarter: " + str(quarter) + " Org: " + org_name)
s = self.__build_search(date_range, org_name=org_name)
response = s.execute()
for timing in response.aggregations[self.TIMEFRAME].buckets:
yield self.__build_dataframe(timing, org_name=org_name).copy()
# Get project specific data
for project in projects:
logger.info(self.__log_prefix + " Quarter: " + str(quarter) + " Project: " + project)
# Global project
s = self.__build_search(date_range, project_name=project)
response = s.execute()
for timing in response.aggregations[self.TIMEFRAME].buckets:
yield self.__build_dataframe(timing, project_name=project).copy()
# Split by Org
for org_name in orgs:
logger.info(self.__log_prefix + " Quarter: " + str(quarter) + " Project: " + project + " Org: " + org_name)
s = self.__build_search(date_range, project_name=project, org_name=org_name)
response = s.execute()
for timing in response.aggregations[self.TIMEFRAME].buckets:
yield self.__build_dataframe(timing, project_name=project, org_name=org_name).copy() | python | def read_block(self, size=None, from_date=None):
"""Read author commits by Quarter, Org and Project.
:param from_date: not used here. Incremental mode not supported yet.
:param size: not used here.
:return: DataFrame with commit count per author, split by quarter, org and project.
"""
# Get quarters corresponding to All items (Incremental mode NOT SUPPORTED)
quarters = self.__quarters()
for quarter in quarters:
logger.info(self.__log_prefix + " Quarter: " + str(quarter))
date_range = {self._timeframe_field: {'gte': quarter.start_time, 'lte': quarter.end_time}}
orgs = self.__list_uniques(date_range, self.AUTHOR_ORG)
projects = self.__list_uniques(date_range, self.PROJECT)
# Get global data
s = self.__build_search(date_range)
response = s.execute()
for timing in response.aggregations[self.TIMEFRAME].buckets:
yield self.__build_dataframe(timing).copy()
# Get global data by Org
for org_name in orgs:
logger.info(self.__log_prefix + " Quarter: " + str(quarter) + " Org: " + org_name)
s = self.__build_search(date_range, org_name=org_name)
response = s.execute()
for timing in response.aggregations[self.TIMEFRAME].buckets:
yield self.__build_dataframe(timing, org_name=org_name).copy()
# Get project specific data
for project in projects:
logger.info(self.__log_prefix + " Quarter: " + str(quarter) + " Project: " + project)
# Global project
s = self.__build_search(date_range, project_name=project)
response = s.execute()
for timing in response.aggregations[self.TIMEFRAME].buckets:
yield self.__build_dataframe(timing, project_name=project).copy()
# Split by Org
for org_name in orgs:
logger.info(self.__log_prefix + " Quarter: " + str(quarter) + " Project: " + project + " Org: " + org_name)
s = self.__build_search(date_range, project_name=project, org_name=org_name)
response = s.execute()
for timing in response.aggregations[self.TIMEFRAME].buckets:
yield self.__build_dataframe(timing, project_name=project, org_name=org_name).copy() | [
"def",
"read_block",
"(",
"self",
",",
"size",
"=",
"None",
",",
"from_date",
"=",
"None",
")",
":",
"# Get quarters corresponding to All items (Incremental mode NOT SUPPORTED)",
"quarters",
"=",
"self",
".",
"__quarters",
"(",
")",
"for",
"quarter",
"in",
"quarters",
":",
"logger",
".",
"info",
"(",
"self",
".",
"__log_prefix",
"+",
"\" Quarter: \"",
"+",
"str",
"(",
"quarter",
")",
")",
"date_range",
"=",
"{",
"self",
".",
"_timeframe_field",
":",
"{",
"'gte'",
":",
"quarter",
".",
"start_time",
",",
"'lte'",
":",
"quarter",
".",
"end_time",
"}",
"}",
"orgs",
"=",
"self",
".",
"__list_uniques",
"(",
"date_range",
",",
"self",
".",
"AUTHOR_ORG",
")",
"projects",
"=",
"self",
".",
"__list_uniques",
"(",
"date_range",
",",
"self",
".",
"PROJECT",
")",
"# Get global data",
"s",
"=",
"self",
".",
"__build_search",
"(",
"date_range",
")",
"response",
"=",
"s",
".",
"execute",
"(",
")",
"for",
"timing",
"in",
"response",
".",
"aggregations",
"[",
"self",
".",
"TIMEFRAME",
"]",
".",
"buckets",
":",
"yield",
"self",
".",
"__build_dataframe",
"(",
"timing",
")",
".",
"copy",
"(",
")",
"# Get global data by Org",
"for",
"org_name",
"in",
"orgs",
":",
"logger",
".",
"info",
"(",
"self",
".",
"__log_prefix",
"+",
"\" Quarter: \"",
"+",
"str",
"(",
"quarter",
")",
"+",
"\" Org: \"",
"+",
"org_name",
")",
"s",
"=",
"self",
".",
"__build_search",
"(",
"date_range",
",",
"org_name",
"=",
"org_name",
")",
"response",
"=",
"s",
".",
"execute",
"(",
")",
"for",
"timing",
"in",
"response",
".",
"aggregations",
"[",
"self",
".",
"TIMEFRAME",
"]",
".",
"buckets",
":",
"yield",
"self",
".",
"__build_dataframe",
"(",
"timing",
",",
"org_name",
"=",
"org_name",
")",
".",
"copy",
"(",
")",
"# Get project specific data",
"for",
"project",
"in",
"projects",
":",
"logger",
".",
"info",
"(",
"self",
".",
"__log_prefix",
"+",
"\" Quarter: \"",
"+",
"str",
"(",
"quarter",
")",
"+",
"\" Project: \"",
"+",
"project",
")",
"# Global project",
"s",
"=",
"self",
".",
"__build_search",
"(",
"date_range",
",",
"project_name",
"=",
"project",
")",
"response",
"=",
"s",
".",
"execute",
"(",
")",
"for",
"timing",
"in",
"response",
".",
"aggregations",
"[",
"self",
".",
"TIMEFRAME",
"]",
".",
"buckets",
":",
"yield",
"self",
".",
"__build_dataframe",
"(",
"timing",
",",
"project_name",
"=",
"project",
")",
".",
"copy",
"(",
")",
"# Split by Org",
"for",
"org_name",
"in",
"orgs",
":",
"logger",
".",
"info",
"(",
"self",
".",
"__log_prefix",
"+",
"\" Quarter: \"",
"+",
"str",
"(",
"quarter",
")",
"+",
"\" Project: \"",
"+",
"project",
"+",
"\" Org: \"",
"+",
"org_name",
")",
"s",
"=",
"self",
".",
"__build_search",
"(",
"date_range",
",",
"project_name",
"=",
"project",
",",
"org_name",
"=",
"org_name",
")",
"response",
"=",
"s",
".",
"execute",
"(",
")",
"for",
"timing",
"in",
"response",
".",
"aggregations",
"[",
"self",
".",
"TIMEFRAME",
"]",
".",
"buckets",
":",
"yield",
"self",
".",
"__build_dataframe",
"(",
"timing",
",",
"project_name",
"=",
"project",
",",
"org_name",
"=",
"org_name",
")",
".",
"copy",
"(",
")"
]
| Read author commits by Quarter, Org and Project.
:param from_date: not used here. Incremental mode not supported yet.
:param size: not used here.
:return: DataFrame with commit count per author, split by quarter, org and project. | [
"Read",
"author",
"commits",
"by",
"Quarter",
"Org",
"and",
"Project",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/study_ceres_onion.py#L72-L131 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/study_ceres_onion.py | ESOnionConnector.__quarters | def __quarters(self, from_date=None):
"""Get a set of quarters with available items from a given index date.
:param from_date:
:return: list of `pandas.Period` corresponding to quarters
"""
s = Search(using=self._es_conn, index=self._es_index)
if from_date:
# Work around to solve conversion problem of '__' to '.' in field name
q = Q('range')
q.__setattr__(self._sort_on_field, {'gte': from_date})
s = s.filter(q)
# from:to parameters (=> from: 0, size: 0)
s = s[0:0]
s.aggs.bucket(self.TIMEFRAME, 'date_histogram', field=self._timeframe_field,
interval='quarter', min_doc_count=1)
response = s.execute()
quarters = []
for quarter in response.aggregations[self.TIMEFRAME].buckets:
period = pandas.Period(quarter.key_as_string, 'Q')
quarters.append(period)
return quarters | python | def __quarters(self, from_date=None):
"""Get a set of quarters with available items from a given index date.
:param from_date:
:return: list of `pandas.Period` corresponding to quarters
"""
s = Search(using=self._es_conn, index=self._es_index)
if from_date:
# Work around to solve conversion problem of '__' to '.' in field name
q = Q('range')
q.__setattr__(self._sort_on_field, {'gte': from_date})
s = s.filter(q)
# from:to parameters (=> from: 0, size: 0)
s = s[0:0]
s.aggs.bucket(self.TIMEFRAME, 'date_histogram', field=self._timeframe_field,
interval='quarter', min_doc_count=1)
response = s.execute()
quarters = []
for quarter in response.aggregations[self.TIMEFRAME].buckets:
period = pandas.Period(quarter.key_as_string, 'Q')
quarters.append(period)
return quarters | [
"def",
"__quarters",
"(",
"self",
",",
"from_date",
"=",
"None",
")",
":",
"s",
"=",
"Search",
"(",
"using",
"=",
"self",
".",
"_es_conn",
",",
"index",
"=",
"self",
".",
"_es_index",
")",
"if",
"from_date",
":",
"# Work around to solve conversion problem of '__' to '.' in field name",
"q",
"=",
"Q",
"(",
"'range'",
")",
"q",
".",
"__setattr__",
"(",
"self",
".",
"_sort_on_field",
",",
"{",
"'gte'",
":",
"from_date",
"}",
")",
"s",
"=",
"s",
".",
"filter",
"(",
"q",
")",
"# from:to parameters (=> from: 0, size: 0)",
"s",
"=",
"s",
"[",
"0",
":",
"0",
"]",
"s",
".",
"aggs",
".",
"bucket",
"(",
"self",
".",
"TIMEFRAME",
",",
"'date_histogram'",
",",
"field",
"=",
"self",
".",
"_timeframe_field",
",",
"interval",
"=",
"'quarter'",
",",
"min_doc_count",
"=",
"1",
")",
"response",
"=",
"s",
".",
"execute",
"(",
")",
"quarters",
"=",
"[",
"]",
"for",
"quarter",
"in",
"response",
".",
"aggregations",
"[",
"self",
".",
"TIMEFRAME",
"]",
".",
"buckets",
":",
"period",
"=",
"pandas",
".",
"Period",
"(",
"quarter",
".",
"key_as_string",
",",
"'Q'",
")",
"quarters",
".",
"append",
"(",
"period",
")",
"return",
"quarters"
]
| Get a set of quarters with available items from a given index date.
:param from_date:
:return: list of `pandas.Period` corresponding to quarters | [
"Get",
"a",
"set",
"of",
"quarters",
"with",
"available",
"items",
"from",
"a",
"given",
"index",
"date",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/study_ceres_onion.py#L214-L239 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/study_ceres_onion.py | ESOnionConnector.__list_uniques | def __list_uniques(self, date_range, field_name):
"""Retrieve a list of unique values in a given field within a date range.
:param date_range:
:param field_name:
:return: list of unique values.
"""
# Get project list
s = Search(using=self._es_conn, index=self._es_index)
s = s.filter('range', **date_range)
# from:to parameters (=> from: 0, size: 0)
s = s[0:0]
s.aggs.bucket('uniques', 'terms', field=field_name, size=1000)
response = s.execute()
uniques_list = []
for item in response.aggregations.uniques.buckets:
uniques_list.append(item.key)
return uniques_list | python | def __list_uniques(self, date_range, field_name):
"""Retrieve a list of unique values in a given field within a date range.
:param date_range:
:param field_name:
:return: list of unique values.
"""
# Get project list
s = Search(using=self._es_conn, index=self._es_index)
s = s.filter('range', **date_range)
# from:to parameters (=> from: 0, size: 0)
s = s[0:0]
s.aggs.bucket('uniques', 'terms', field=field_name, size=1000)
response = s.execute()
uniques_list = []
for item in response.aggregations.uniques.buckets:
uniques_list.append(item.key)
return uniques_list | [
"def",
"__list_uniques",
"(",
"self",
",",
"date_range",
",",
"field_name",
")",
":",
"# Get project list",
"s",
"=",
"Search",
"(",
"using",
"=",
"self",
".",
"_es_conn",
",",
"index",
"=",
"self",
".",
"_es_index",
")",
"s",
"=",
"s",
".",
"filter",
"(",
"'range'",
",",
"*",
"*",
"date_range",
")",
"# from:to parameters (=> from: 0, size: 0)",
"s",
"=",
"s",
"[",
"0",
":",
"0",
"]",
"s",
".",
"aggs",
".",
"bucket",
"(",
"'uniques'",
",",
"'terms'",
",",
"field",
"=",
"field_name",
",",
"size",
"=",
"1000",
")",
"response",
"=",
"s",
".",
"execute",
"(",
")",
"uniques_list",
"=",
"[",
"]",
"for",
"item",
"in",
"response",
".",
"aggregations",
".",
"uniques",
".",
"buckets",
":",
"uniques_list",
".",
"append",
"(",
"item",
".",
"key",
")",
"return",
"uniques_list"
]
| Retrieve a list of unique values in a given field within a date range.
:param date_range:
:param field_name:
:return: list of unique values. | [
"Retrieve",
"a",
"list",
"of",
"unique",
"values",
"in",
"a",
"given",
"field",
"within",
"a",
"date",
"range",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/study_ceres_onion.py#L241-L259 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/study_ceres_onion.py | ESOnionConnector.__build_dataframe | def __build_dataframe(self, timing, project_name=None, org_name=None):
"""Build a DataFrame from a time bucket.
:param timing:
:param project_name:
:param org_name:
:return:
"""
date_list = []
uuid_list = []
name_list = []
contribs_list = []
latest_ts_list = []
logger.debug(self.__log_prefix + " timing: " + timing.key_as_string)
for author in timing[self.AUTHOR_UUID].buckets:
latest_ts_list.append(timing[self.LATEST_TS].value_as_string)
date_list.append(timing.key_as_string)
uuid_list.append(author.key)
if author[self.AUTHOR_NAME] and author[self.AUTHOR_NAME].buckets \
and len(author[self.AUTHOR_NAME].buckets) > 0:
name_list.append(author[self.AUTHOR_NAME].buckets[0].key)
else:
name_list.append("Unknown")
contribs_list.append(author[self.CONTRIBUTIONS].value)
df = pandas.DataFrame()
df[self.TIMEFRAME] = date_list
df[self.AUTHOR_UUID] = uuid_list
df[self.AUTHOR_NAME] = name_list
df[self.CONTRIBUTIONS] = contribs_list
df[self.TIMESTAMP] = latest_ts_list
if not project_name:
project_name = "_Global_"
df[self.PROJECT] = project_name
if not org_name:
org_name = "_Global_"
df[self.AUTHOR_ORG] = org_name
return df | python | def __build_dataframe(self, timing, project_name=None, org_name=None):
"""Build a DataFrame from a time bucket.
:param timing:
:param project_name:
:param org_name:
:return:
"""
date_list = []
uuid_list = []
name_list = []
contribs_list = []
latest_ts_list = []
logger.debug(self.__log_prefix + " timing: " + timing.key_as_string)
for author in timing[self.AUTHOR_UUID].buckets:
latest_ts_list.append(timing[self.LATEST_TS].value_as_string)
date_list.append(timing.key_as_string)
uuid_list.append(author.key)
if author[self.AUTHOR_NAME] and author[self.AUTHOR_NAME].buckets \
and len(author[self.AUTHOR_NAME].buckets) > 0:
name_list.append(author[self.AUTHOR_NAME].buckets[0].key)
else:
name_list.append("Unknown")
contribs_list.append(author[self.CONTRIBUTIONS].value)
df = pandas.DataFrame()
df[self.TIMEFRAME] = date_list
df[self.AUTHOR_UUID] = uuid_list
df[self.AUTHOR_NAME] = name_list
df[self.CONTRIBUTIONS] = contribs_list
df[self.TIMESTAMP] = latest_ts_list
if not project_name:
project_name = "_Global_"
df[self.PROJECT] = project_name
if not org_name:
org_name = "_Global_"
df[self.AUTHOR_ORG] = org_name
return df | [
"def",
"__build_dataframe",
"(",
"self",
",",
"timing",
",",
"project_name",
"=",
"None",
",",
"org_name",
"=",
"None",
")",
":",
"date_list",
"=",
"[",
"]",
"uuid_list",
"=",
"[",
"]",
"name_list",
"=",
"[",
"]",
"contribs_list",
"=",
"[",
"]",
"latest_ts_list",
"=",
"[",
"]",
"logger",
".",
"debug",
"(",
"self",
".",
"__log_prefix",
"+",
"\" timing: \"",
"+",
"timing",
".",
"key_as_string",
")",
"for",
"author",
"in",
"timing",
"[",
"self",
".",
"AUTHOR_UUID",
"]",
".",
"buckets",
":",
"latest_ts_list",
".",
"append",
"(",
"timing",
"[",
"self",
".",
"LATEST_TS",
"]",
".",
"value_as_string",
")",
"date_list",
".",
"append",
"(",
"timing",
".",
"key_as_string",
")",
"uuid_list",
".",
"append",
"(",
"author",
".",
"key",
")",
"if",
"author",
"[",
"self",
".",
"AUTHOR_NAME",
"]",
"and",
"author",
"[",
"self",
".",
"AUTHOR_NAME",
"]",
".",
"buckets",
"and",
"len",
"(",
"author",
"[",
"self",
".",
"AUTHOR_NAME",
"]",
".",
"buckets",
")",
">",
"0",
":",
"name_list",
".",
"append",
"(",
"author",
"[",
"self",
".",
"AUTHOR_NAME",
"]",
".",
"buckets",
"[",
"0",
"]",
".",
"key",
")",
"else",
":",
"name_list",
".",
"append",
"(",
"\"Unknown\"",
")",
"contribs_list",
".",
"append",
"(",
"author",
"[",
"self",
".",
"CONTRIBUTIONS",
"]",
".",
"value",
")",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
")",
"df",
"[",
"self",
".",
"TIMEFRAME",
"]",
"=",
"date_list",
"df",
"[",
"self",
".",
"AUTHOR_UUID",
"]",
"=",
"uuid_list",
"df",
"[",
"self",
".",
"AUTHOR_NAME",
"]",
"=",
"name_list",
"df",
"[",
"self",
".",
"CONTRIBUTIONS",
"]",
"=",
"contribs_list",
"df",
"[",
"self",
".",
"TIMESTAMP",
"]",
"=",
"latest_ts_list",
"if",
"not",
"project_name",
":",
"project_name",
"=",
"\"_Global_\"",
"df",
"[",
"self",
".",
"PROJECT",
"]",
"=",
"project_name",
"if",
"not",
"org_name",
":",
"org_name",
"=",
"\"_Global_\"",
"df",
"[",
"self",
".",
"AUTHOR_ORG",
"]",
"=",
"org_name",
"return",
"df"
]
| Build a DataFrame from a time bucket.
:param timing:
:param project_name:
:param org_name:
:return: | [
"Build",
"a",
"DataFrame",
"from",
"a",
"time",
"bucket",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/study_ceres_onion.py#L285-L326 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/study_ceres_onion.py | OnionStudy.process | def process(self, items_block):
"""Process a DataFrame to compute Onion.
:param items_block: items to be processed. Expects to find a pandas DataFrame.
"""
logger.info(self.__log_prefix + " Authors to process: " + str(len(items_block)))
onion_enrich = Onion(items_block)
df_onion = onion_enrich.enrich(member_column=ESOnionConnector.AUTHOR_UUID,
events_column=ESOnionConnector.CONTRIBUTIONS)
# Get and store Quarter as String
df_onion['quarter'] = df_onion[ESOnionConnector.TIMEFRAME].map(lambda x: str(pandas.Period(x, 'Q')))
# Add metadata: enriched on timestamp
df_onion['metadata__enriched_on'] = datetime.utcnow().isoformat()
df_onion['data_source'] = self.data_source
df_onion['grimoire_creation_date'] = df_onion[ESOnionConnector.TIMEFRAME]
logger.info(self.__log_prefix + " Final new events: " + str(len(df_onion)))
return self.ProcessResults(processed=len(df_onion), out_items=df_onion) | python | def process(self, items_block):
"""Process a DataFrame to compute Onion.
:param items_block: items to be processed. Expects to find a pandas DataFrame.
"""
logger.info(self.__log_prefix + " Authors to process: " + str(len(items_block)))
onion_enrich = Onion(items_block)
df_onion = onion_enrich.enrich(member_column=ESOnionConnector.AUTHOR_UUID,
events_column=ESOnionConnector.CONTRIBUTIONS)
# Get and store Quarter as String
df_onion['quarter'] = df_onion[ESOnionConnector.TIMEFRAME].map(lambda x: str(pandas.Period(x, 'Q')))
# Add metadata: enriched on timestamp
df_onion['metadata__enriched_on'] = datetime.utcnow().isoformat()
df_onion['data_source'] = self.data_source
df_onion['grimoire_creation_date'] = df_onion[ESOnionConnector.TIMEFRAME]
logger.info(self.__log_prefix + " Final new events: " + str(len(df_onion)))
return self.ProcessResults(processed=len(df_onion), out_items=df_onion) | [
"def",
"process",
"(",
"self",
",",
"items_block",
")",
":",
"logger",
".",
"info",
"(",
"self",
".",
"__log_prefix",
"+",
"\" Authors to process: \"",
"+",
"str",
"(",
"len",
"(",
"items_block",
")",
")",
")",
"onion_enrich",
"=",
"Onion",
"(",
"items_block",
")",
"df_onion",
"=",
"onion_enrich",
".",
"enrich",
"(",
"member_column",
"=",
"ESOnionConnector",
".",
"AUTHOR_UUID",
",",
"events_column",
"=",
"ESOnionConnector",
".",
"CONTRIBUTIONS",
")",
"# Get and store Quarter as String",
"df_onion",
"[",
"'quarter'",
"]",
"=",
"df_onion",
"[",
"ESOnionConnector",
".",
"TIMEFRAME",
"]",
".",
"map",
"(",
"lambda",
"x",
":",
"str",
"(",
"pandas",
".",
"Period",
"(",
"x",
",",
"'Q'",
")",
")",
")",
"# Add metadata: enriched on timestamp",
"df_onion",
"[",
"'metadata__enriched_on'",
"]",
"=",
"datetime",
".",
"utcnow",
"(",
")",
".",
"isoformat",
"(",
")",
"df_onion",
"[",
"'data_source'",
"]",
"=",
"self",
".",
"data_source",
"df_onion",
"[",
"'grimoire_creation_date'",
"]",
"=",
"df_onion",
"[",
"ESOnionConnector",
".",
"TIMEFRAME",
"]",
"logger",
".",
"info",
"(",
"self",
".",
"__log_prefix",
"+",
"\" Final new events: \"",
"+",
"str",
"(",
"len",
"(",
"df_onion",
")",
")",
")",
"return",
"self",
".",
"ProcessResults",
"(",
"processed",
"=",
"len",
"(",
"df_onion",
")",
",",
"out_items",
"=",
"df_onion",
")"
]
| Process a DataFrame to compute Onion.
:param items_block: items to be processed. Expects to find a pandas DataFrame. | [
"Process",
"a",
"DataFrame",
"to",
"compute",
"Onion",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/study_ceres_onion.py#L346-L368 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/projects.py | GrimoireLibProjects.get_projects | def get_projects(self):
""" Get the projects list from database """
repos_list = []
gerrit_projects_db = self.projects_db
db = Database(user="root", passwd="", host="localhost", port=3306,
scrdb=None, shdb=gerrit_projects_db, prjdb=None)
sql = """
SELECT DISTINCT(repository_name)
FROM project_repositories
WHERE data_source='scr'
"""
repos_list_raw = db.execute(sql)
# Convert from review.openstack.org_openstack/rpm-packaging-tools to
# openstack_rpm-packaging-tools
for repo in repos_list_raw:
# repo_name = repo[0].replace("review.openstack.org_","")
repo_name = repo[0].replace(self.repository + "_", "")
repos_list.append(repo_name)
return repos_list | python | def get_projects(self):
""" Get the projects list from database """
repos_list = []
gerrit_projects_db = self.projects_db
db = Database(user="root", passwd="", host="localhost", port=3306,
scrdb=None, shdb=gerrit_projects_db, prjdb=None)
sql = """
SELECT DISTINCT(repository_name)
FROM project_repositories
WHERE data_source='scr'
"""
repos_list_raw = db.execute(sql)
# Convert from review.openstack.org_openstack/rpm-packaging-tools to
# openstack_rpm-packaging-tools
for repo in repos_list_raw:
# repo_name = repo[0].replace("review.openstack.org_","")
repo_name = repo[0].replace(self.repository + "_", "")
repos_list.append(repo_name)
return repos_list | [
"def",
"get_projects",
"(",
"self",
")",
":",
"repos_list",
"=",
"[",
"]",
"gerrit_projects_db",
"=",
"self",
".",
"projects_db",
"db",
"=",
"Database",
"(",
"user",
"=",
"\"root\"",
",",
"passwd",
"=",
"\"\"",
",",
"host",
"=",
"\"localhost\"",
",",
"port",
"=",
"3306",
",",
"scrdb",
"=",
"None",
",",
"shdb",
"=",
"gerrit_projects_db",
",",
"prjdb",
"=",
"None",
")",
"sql",
"=",
"\"\"\"\n SELECT DISTINCT(repository_name)\n FROM project_repositories\n WHERE data_source='scr'\n \"\"\"",
"repos_list_raw",
"=",
"db",
".",
"execute",
"(",
"sql",
")",
"# Convert from review.openstack.org_openstack/rpm-packaging-tools to",
"# openstack_rpm-packaging-tools",
"for",
"repo",
"in",
"repos_list_raw",
":",
"# repo_name = repo[0].replace(\"review.openstack.org_\",\"\")",
"repo_name",
"=",
"repo",
"[",
"0",
"]",
".",
"replace",
"(",
"self",
".",
"repository",
"+",
"\"_\"",
",",
"\"\"",
")",
"repos_list",
".",
"append",
"(",
"repo_name",
")",
"return",
"repos_list"
]
| Get the projects list from database | [
"Get",
"the",
"projects",
"list",
"from",
"database"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/projects.py#L37-L62 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/enrich.py | metadata | def metadata(func):
"""Add metadata to an item.
Decorator that adds metadata to a given item such as
the gelk revision used.
"""
@functools.wraps(func)
def decorator(self, *args, **kwargs):
eitem = func(self, *args, **kwargs)
metadata = {
'metadata__gelk_version': self.gelk_version,
'metadata__gelk_backend_name': self.__class__.__name__,
'metadata__enriched_on': datetime_utcnow().isoformat()
}
eitem.update(metadata)
return eitem
return decorator | python | def metadata(func):
"""Add metadata to an item.
Decorator that adds metadata to a given item such as
the gelk revision used.
"""
@functools.wraps(func)
def decorator(self, *args, **kwargs):
eitem = func(self, *args, **kwargs)
metadata = {
'metadata__gelk_version': self.gelk_version,
'metadata__gelk_backend_name': self.__class__.__name__,
'metadata__enriched_on': datetime_utcnow().isoformat()
}
eitem.update(metadata)
return eitem
return decorator | [
"def",
"metadata",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"decorator",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"eitem",
"=",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"metadata",
"=",
"{",
"'metadata__gelk_version'",
":",
"self",
".",
"gelk_version",
",",
"'metadata__gelk_backend_name'",
":",
"self",
".",
"__class__",
".",
"__name__",
",",
"'metadata__enriched_on'",
":",
"datetime_utcnow",
"(",
")",
".",
"isoformat",
"(",
")",
"}",
"eitem",
".",
"update",
"(",
"metadata",
")",
"return",
"eitem",
"return",
"decorator"
]
| Add metadata to an item.
Decorator that adds metadata to a given item such as
the gelk revision used. | [
"Add",
"metadata",
"to",
"an",
"item",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/enrich.py#L78-L95 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/enrich.py | Enrich.get_grimoire_fields | def get_grimoire_fields(self, creation_date, item_name):
""" Return common grimoire fields for all data sources """
grimoire_date = None
try:
grimoire_date = str_to_datetime(creation_date).isoformat()
except Exception as ex:
pass
name = "is_" + self.get_connector_name() + "_" + item_name
return {
"grimoire_creation_date": grimoire_date,
name: 1
} | python | def get_grimoire_fields(self, creation_date, item_name):
""" Return common grimoire fields for all data sources """
grimoire_date = None
try:
grimoire_date = str_to_datetime(creation_date).isoformat()
except Exception as ex:
pass
name = "is_" + self.get_connector_name() + "_" + item_name
return {
"grimoire_creation_date": grimoire_date,
name: 1
} | [
"def",
"get_grimoire_fields",
"(",
"self",
",",
"creation_date",
",",
"item_name",
")",
":",
"grimoire_date",
"=",
"None",
"try",
":",
"grimoire_date",
"=",
"str_to_datetime",
"(",
"creation_date",
")",
".",
"isoformat",
"(",
")",
"except",
"Exception",
"as",
"ex",
":",
"pass",
"name",
"=",
"\"is_\"",
"+",
"self",
".",
"get_connector_name",
"(",
")",
"+",
"\"_\"",
"+",
"item_name",
"return",
"{",
"\"grimoire_creation_date\"",
":",
"grimoire_date",
",",
"name",
":",
"1",
"}"
]
| Return common grimoire fields for all data sources | [
"Return",
"common",
"grimoire",
"fields",
"for",
"all",
"data",
"sources"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/enrich.py#L489-L503 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/enrich.py | Enrich.add_project_levels | def add_project_levels(cls, project):
""" Add project sub levels extra items """
eitem_path = ''
eitem_project_levels = {}
if project is not None:
subprojects = project.split('.')
for i in range(0, len(subprojects)):
if i > 0:
eitem_path += "."
eitem_path += subprojects[i]
eitem_project_levels['project_' + str(i + 1)] = eitem_path
return eitem_project_levels | python | def add_project_levels(cls, project):
""" Add project sub levels extra items """
eitem_path = ''
eitem_project_levels = {}
if project is not None:
subprojects = project.split('.')
for i in range(0, len(subprojects)):
if i > 0:
eitem_path += "."
eitem_path += subprojects[i]
eitem_project_levels['project_' + str(i + 1)] = eitem_path
return eitem_project_levels | [
"def",
"add_project_levels",
"(",
"cls",
",",
"project",
")",
":",
"eitem_path",
"=",
"''",
"eitem_project_levels",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"subprojects",
"=",
"project",
".",
"split",
"(",
"'.'",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"subprojects",
")",
")",
":",
"if",
"i",
">",
"0",
":",
"eitem_path",
"+=",
"\".\"",
"eitem_path",
"+=",
"subprojects",
"[",
"i",
"]",
"eitem_project_levels",
"[",
"'project_'",
"+",
"str",
"(",
"i",
"+",
"1",
")",
"]",
"=",
"eitem_path",
"return",
"eitem_project_levels"
]
| Add project sub levels extra items | [
"Add",
"project",
"sub",
"levels",
"extra",
"items"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/enrich.py#L515-L529 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/enrich.py | Enrich.get_item_metadata | def get_item_metadata(self, eitem):
"""
In the projects.json file, inside each project, there is a field called "meta" which has a
dictionary with fields to be added to the enriched items for this project.
This fields must be added with the prefix cm_ (custom metadata).
This method fetch the metadata fields for the project in which the eitem is included.
:param eitem: enriched item to search metadata for
:return: a dictionary with the metadata fields
"""
eitem_metadata = {}
# Get the project entry for the item, which includes the metadata
project = self.find_item_project(eitem)
if project and 'meta' in self.json_projects[project]:
meta_fields = self.json_projects[project]['meta']
if isinstance(meta_fields, dict):
eitem_metadata = {CUSTOM_META_PREFIX + "_" + field: value for field, value in meta_fields.items()}
return eitem_metadata | python | def get_item_metadata(self, eitem):
"""
In the projects.json file, inside each project, there is a field called "meta" which has a
dictionary with fields to be added to the enriched items for this project.
This fields must be added with the prefix cm_ (custom metadata).
This method fetch the metadata fields for the project in which the eitem is included.
:param eitem: enriched item to search metadata for
:return: a dictionary with the metadata fields
"""
eitem_metadata = {}
# Get the project entry for the item, which includes the metadata
project = self.find_item_project(eitem)
if project and 'meta' in self.json_projects[project]:
meta_fields = self.json_projects[project]['meta']
if isinstance(meta_fields, dict):
eitem_metadata = {CUSTOM_META_PREFIX + "_" + field: value for field, value in meta_fields.items()}
return eitem_metadata | [
"def",
"get_item_metadata",
"(",
"self",
",",
"eitem",
")",
":",
"eitem_metadata",
"=",
"{",
"}",
"# Get the project entry for the item, which includes the metadata",
"project",
"=",
"self",
".",
"find_item_project",
"(",
"eitem",
")",
"if",
"project",
"and",
"'meta'",
"in",
"self",
".",
"json_projects",
"[",
"project",
"]",
":",
"meta_fields",
"=",
"self",
".",
"json_projects",
"[",
"project",
"]",
"[",
"'meta'",
"]",
"if",
"isinstance",
"(",
"meta_fields",
",",
"dict",
")",
":",
"eitem_metadata",
"=",
"{",
"CUSTOM_META_PREFIX",
"+",
"\"_\"",
"+",
"field",
":",
"value",
"for",
"field",
",",
"value",
"in",
"meta_fields",
".",
"items",
"(",
")",
"}",
"return",
"eitem_metadata"
]
| In the projects.json file, inside each project, there is a field called "meta" which has a
dictionary with fields to be added to the enriched items for this project.
This fields must be added with the prefix cm_ (custom metadata).
This method fetch the metadata fields for the project in which the eitem is included.
:param eitem: enriched item to search metadata for
:return: a dictionary with the metadata fields | [
"In",
"the",
"projects",
".",
"json",
"file",
"inside",
"each",
"project",
"there",
"is",
"a",
"field",
"called",
"meta",
"which",
"has",
"a",
"dictionary",
"with",
"fields",
"to",
"be",
"added",
"to",
"the",
"enriched",
"items",
"for",
"this",
"project",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/enrich.py#L607-L630 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/enrich.py | Enrich.get_domain | def get_domain(self, identity):
""" Get the domain from a SH identity """
domain = None
if identity['email']:
try:
domain = identity['email'].split("@")[1]
except IndexError:
# logger.warning("Bad email format: %s" % (identity['email']))
pass
return domain | python | def get_domain(self, identity):
""" Get the domain from a SH identity """
domain = None
if identity['email']:
try:
domain = identity['email'].split("@")[1]
except IndexError:
# logger.warning("Bad email format: %s" % (identity['email']))
pass
return domain | [
"def",
"get_domain",
"(",
"self",
",",
"identity",
")",
":",
"domain",
"=",
"None",
"if",
"identity",
"[",
"'email'",
"]",
":",
"try",
":",
"domain",
"=",
"identity",
"[",
"'email'",
"]",
".",
"split",
"(",
"\"@\"",
")",
"[",
"1",
"]",
"except",
"IndexError",
":",
"# logger.warning(\"Bad email format: %s\" % (identity['email']))",
"pass",
"return",
"domain"
]
| Get the domain from a SH identity | [
"Get",
"the",
"domain",
"from",
"a",
"SH",
"identity"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/enrich.py#L640-L649 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/enrich.py | Enrich.get_enrollment | def get_enrollment(self, uuid, item_date):
""" Get the enrollment for the uuid when the item was done """
# item_date must be offset-naive (utc)
if item_date and item_date.tzinfo:
item_date = (item_date - item_date.utcoffset()).replace(tzinfo=None)
enrollments = self.get_enrollments(uuid)
enroll = self.unaffiliated_group
if enrollments:
for enrollment in enrollments:
if not item_date:
enroll = enrollment.organization.name
break
elif item_date >= enrollment.start and item_date <= enrollment.end:
enroll = enrollment.organization.name
break
return enroll | python | def get_enrollment(self, uuid, item_date):
""" Get the enrollment for the uuid when the item was done """
# item_date must be offset-naive (utc)
if item_date and item_date.tzinfo:
item_date = (item_date - item_date.utcoffset()).replace(tzinfo=None)
enrollments = self.get_enrollments(uuid)
enroll = self.unaffiliated_group
if enrollments:
for enrollment in enrollments:
if not item_date:
enroll = enrollment.organization.name
break
elif item_date >= enrollment.start and item_date <= enrollment.end:
enroll = enrollment.organization.name
break
return enroll | [
"def",
"get_enrollment",
"(",
"self",
",",
"uuid",
",",
"item_date",
")",
":",
"# item_date must be offset-naive (utc)",
"if",
"item_date",
"and",
"item_date",
".",
"tzinfo",
":",
"item_date",
"=",
"(",
"item_date",
"-",
"item_date",
".",
"utcoffset",
"(",
")",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"enrollments",
"=",
"self",
".",
"get_enrollments",
"(",
"uuid",
")",
"enroll",
"=",
"self",
".",
"unaffiliated_group",
"if",
"enrollments",
":",
"for",
"enrollment",
"in",
"enrollments",
":",
"if",
"not",
"item_date",
":",
"enroll",
"=",
"enrollment",
".",
"organization",
".",
"name",
"break",
"elif",
"item_date",
">=",
"enrollment",
".",
"start",
"and",
"item_date",
"<=",
"enrollment",
".",
"end",
":",
"enroll",
"=",
"enrollment",
".",
"organization",
".",
"name",
"break",
"return",
"enroll"
]
| Get the enrollment for the uuid when the item was done | [
"Get",
"the",
"enrollment",
"for",
"the",
"uuid",
"when",
"the",
"item",
"was",
"done"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/enrich.py#L658-L674 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/enrich.py | Enrich.__get_item_sh_fields_empty | def __get_item_sh_fields_empty(self, rol, undefined=False):
""" Return a SH identity with all fields to empty_field """
# If empty_field is None, the fields do not appear in index patterns
empty_field = '' if not undefined else '-- UNDEFINED --'
return {
rol + "_id": empty_field,
rol + "_uuid": empty_field,
rol + "_name": empty_field,
rol + "_user_name": empty_field,
rol + "_domain": empty_field,
rol + "_gender": empty_field,
rol + "_gender_acc": None,
rol + "_org_name": empty_field,
rol + "_bot": False
} | python | def __get_item_sh_fields_empty(self, rol, undefined=False):
""" Return a SH identity with all fields to empty_field """
# If empty_field is None, the fields do not appear in index patterns
empty_field = '' if not undefined else '-- UNDEFINED --'
return {
rol + "_id": empty_field,
rol + "_uuid": empty_field,
rol + "_name": empty_field,
rol + "_user_name": empty_field,
rol + "_domain": empty_field,
rol + "_gender": empty_field,
rol + "_gender_acc": None,
rol + "_org_name": empty_field,
rol + "_bot": False
} | [
"def",
"__get_item_sh_fields_empty",
"(",
"self",
",",
"rol",
",",
"undefined",
"=",
"False",
")",
":",
"# If empty_field is None, the fields do not appear in index patterns",
"empty_field",
"=",
"''",
"if",
"not",
"undefined",
"else",
"'-- UNDEFINED --'",
"return",
"{",
"rol",
"+",
"\"_id\"",
":",
"empty_field",
",",
"rol",
"+",
"\"_uuid\"",
":",
"empty_field",
",",
"rol",
"+",
"\"_name\"",
":",
"empty_field",
",",
"rol",
"+",
"\"_user_name\"",
":",
"empty_field",
",",
"rol",
"+",
"\"_domain\"",
":",
"empty_field",
",",
"rol",
"+",
"\"_gender\"",
":",
"empty_field",
",",
"rol",
"+",
"\"_gender_acc\"",
":",
"None",
",",
"rol",
"+",
"\"_org_name\"",
":",
"empty_field",
",",
"rol",
"+",
"\"_bot\"",
":",
"False",
"}"
]
| Return a SH identity with all fields to empty_field | [
"Return",
"a",
"SH",
"identity",
"with",
"all",
"fields",
"to",
"empty_field"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/enrich.py#L676-L690 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/enrich.py | Enrich.get_item_sh_fields | def get_item_sh_fields(self, identity=None, item_date=None, sh_id=None,
rol='author'):
""" Get standard SH fields from a SH identity """
eitem_sh = self.__get_item_sh_fields_empty(rol)
if identity:
# Use the identity to get the SortingHat identity
sh_ids = self.get_sh_ids(identity, self.get_connector_name())
eitem_sh[rol + "_id"] = sh_ids.get('id', '')
eitem_sh[rol + "_uuid"] = sh_ids.get('uuid', '')
eitem_sh[rol + "_name"] = identity.get('name', '')
eitem_sh[rol + "_user_name"] = identity.get('username', '')
eitem_sh[rol + "_domain"] = self.get_identity_domain(identity)
elif sh_id:
# Use the SortingHat id to get the identity
eitem_sh[rol + "_id"] = sh_id
eitem_sh[rol + "_uuid"] = self.get_uuid_from_id(sh_id)
else:
# No data to get a SH identity. Return an empty one.
return eitem_sh
# If the identity does not exists return and empty identity
if rol + "_uuid" not in eitem_sh or not eitem_sh[rol + "_uuid"]:
return self.__get_item_sh_fields_empty(rol, undefined=True)
# Get the SH profile to use first this data
profile = self.get_profile_sh(eitem_sh[rol + "_uuid"])
if profile:
# If name not in profile, keep its old value (should be empty or identity's name field value)
eitem_sh[rol + "_name"] = profile.get('name', eitem_sh[rol + "_name"])
email = profile.get('email', None)
if email:
eitem_sh[rol + "_domain"] = self.get_email_domain(email)
eitem_sh[rol + "_gender"] = profile.get('gender', self.unknown_gender)
eitem_sh[rol + "_gender_acc"] = profile.get('gender_acc', 0)
elif not profile and sh_id:
logger.warning("Can't find SH identity profile: %s", sh_id)
# Ensure we always write gender fields
if not eitem_sh.get(rol + "_gender"):
eitem_sh[rol + "_gender"] = self.unknown_gender
eitem_sh[rol + "_gender_acc"] = 0
eitem_sh[rol + "_org_name"] = self.get_enrollment(eitem_sh[rol + "_uuid"], item_date)
eitem_sh[rol + "_bot"] = self.is_bot(eitem_sh[rol + '_uuid'])
return eitem_sh | python | def get_item_sh_fields(self, identity=None, item_date=None, sh_id=None,
rol='author'):
""" Get standard SH fields from a SH identity """
eitem_sh = self.__get_item_sh_fields_empty(rol)
if identity:
# Use the identity to get the SortingHat identity
sh_ids = self.get_sh_ids(identity, self.get_connector_name())
eitem_sh[rol + "_id"] = sh_ids.get('id', '')
eitem_sh[rol + "_uuid"] = sh_ids.get('uuid', '')
eitem_sh[rol + "_name"] = identity.get('name', '')
eitem_sh[rol + "_user_name"] = identity.get('username', '')
eitem_sh[rol + "_domain"] = self.get_identity_domain(identity)
elif sh_id:
# Use the SortingHat id to get the identity
eitem_sh[rol + "_id"] = sh_id
eitem_sh[rol + "_uuid"] = self.get_uuid_from_id(sh_id)
else:
# No data to get a SH identity. Return an empty one.
return eitem_sh
# If the identity does not exists return and empty identity
if rol + "_uuid" not in eitem_sh or not eitem_sh[rol + "_uuid"]:
return self.__get_item_sh_fields_empty(rol, undefined=True)
# Get the SH profile to use first this data
profile = self.get_profile_sh(eitem_sh[rol + "_uuid"])
if profile:
# If name not in profile, keep its old value (should be empty or identity's name field value)
eitem_sh[rol + "_name"] = profile.get('name', eitem_sh[rol + "_name"])
email = profile.get('email', None)
if email:
eitem_sh[rol + "_domain"] = self.get_email_domain(email)
eitem_sh[rol + "_gender"] = profile.get('gender', self.unknown_gender)
eitem_sh[rol + "_gender_acc"] = profile.get('gender_acc', 0)
elif not profile and sh_id:
logger.warning("Can't find SH identity profile: %s", sh_id)
# Ensure we always write gender fields
if not eitem_sh.get(rol + "_gender"):
eitem_sh[rol + "_gender"] = self.unknown_gender
eitem_sh[rol + "_gender_acc"] = 0
eitem_sh[rol + "_org_name"] = self.get_enrollment(eitem_sh[rol + "_uuid"], item_date)
eitem_sh[rol + "_bot"] = self.is_bot(eitem_sh[rol + '_uuid'])
return eitem_sh | [
"def",
"get_item_sh_fields",
"(",
"self",
",",
"identity",
"=",
"None",
",",
"item_date",
"=",
"None",
",",
"sh_id",
"=",
"None",
",",
"rol",
"=",
"'author'",
")",
":",
"eitem_sh",
"=",
"self",
".",
"__get_item_sh_fields_empty",
"(",
"rol",
")",
"if",
"identity",
":",
"# Use the identity to get the SortingHat identity",
"sh_ids",
"=",
"self",
".",
"get_sh_ids",
"(",
"identity",
",",
"self",
".",
"get_connector_name",
"(",
")",
")",
"eitem_sh",
"[",
"rol",
"+",
"\"_id\"",
"]",
"=",
"sh_ids",
".",
"get",
"(",
"'id'",
",",
"''",
")",
"eitem_sh",
"[",
"rol",
"+",
"\"_uuid\"",
"]",
"=",
"sh_ids",
".",
"get",
"(",
"'uuid'",
",",
"''",
")",
"eitem_sh",
"[",
"rol",
"+",
"\"_name\"",
"]",
"=",
"identity",
".",
"get",
"(",
"'name'",
",",
"''",
")",
"eitem_sh",
"[",
"rol",
"+",
"\"_user_name\"",
"]",
"=",
"identity",
".",
"get",
"(",
"'username'",
",",
"''",
")",
"eitem_sh",
"[",
"rol",
"+",
"\"_domain\"",
"]",
"=",
"self",
".",
"get_identity_domain",
"(",
"identity",
")",
"elif",
"sh_id",
":",
"# Use the SortingHat id to get the identity",
"eitem_sh",
"[",
"rol",
"+",
"\"_id\"",
"]",
"=",
"sh_id",
"eitem_sh",
"[",
"rol",
"+",
"\"_uuid\"",
"]",
"=",
"self",
".",
"get_uuid_from_id",
"(",
"sh_id",
")",
"else",
":",
"# No data to get a SH identity. Return an empty one.",
"return",
"eitem_sh",
"# If the identity does not exists return and empty identity",
"if",
"rol",
"+",
"\"_uuid\"",
"not",
"in",
"eitem_sh",
"or",
"not",
"eitem_sh",
"[",
"rol",
"+",
"\"_uuid\"",
"]",
":",
"return",
"self",
".",
"__get_item_sh_fields_empty",
"(",
"rol",
",",
"undefined",
"=",
"True",
")",
"# Get the SH profile to use first this data",
"profile",
"=",
"self",
".",
"get_profile_sh",
"(",
"eitem_sh",
"[",
"rol",
"+",
"\"_uuid\"",
"]",
")",
"if",
"profile",
":",
"# If name not in profile, keep its old value (should be empty or identity's name field value)",
"eitem_sh",
"[",
"rol",
"+",
"\"_name\"",
"]",
"=",
"profile",
".",
"get",
"(",
"'name'",
",",
"eitem_sh",
"[",
"rol",
"+",
"\"_name\"",
"]",
")",
"email",
"=",
"profile",
".",
"get",
"(",
"'email'",
",",
"None",
")",
"if",
"email",
":",
"eitem_sh",
"[",
"rol",
"+",
"\"_domain\"",
"]",
"=",
"self",
".",
"get_email_domain",
"(",
"email",
")",
"eitem_sh",
"[",
"rol",
"+",
"\"_gender\"",
"]",
"=",
"profile",
".",
"get",
"(",
"'gender'",
",",
"self",
".",
"unknown_gender",
")",
"eitem_sh",
"[",
"rol",
"+",
"\"_gender_acc\"",
"]",
"=",
"profile",
".",
"get",
"(",
"'gender_acc'",
",",
"0",
")",
"elif",
"not",
"profile",
"and",
"sh_id",
":",
"logger",
".",
"warning",
"(",
"\"Can't find SH identity profile: %s\"",
",",
"sh_id",
")",
"# Ensure we always write gender fields",
"if",
"not",
"eitem_sh",
".",
"get",
"(",
"rol",
"+",
"\"_gender\"",
")",
":",
"eitem_sh",
"[",
"rol",
"+",
"\"_gender\"",
"]",
"=",
"self",
".",
"unknown_gender",
"eitem_sh",
"[",
"rol",
"+",
"\"_gender_acc\"",
"]",
"=",
"0",
"eitem_sh",
"[",
"rol",
"+",
"\"_org_name\"",
"]",
"=",
"self",
".",
"get_enrollment",
"(",
"eitem_sh",
"[",
"rol",
"+",
"\"_uuid\"",
"]",
",",
"item_date",
")",
"eitem_sh",
"[",
"rol",
"+",
"\"_bot\"",
"]",
"=",
"self",
".",
"is_bot",
"(",
"eitem_sh",
"[",
"rol",
"+",
"'_uuid'",
"]",
")",
"return",
"eitem_sh"
]
| Get standard SH fields from a SH identity | [
"Get",
"standard",
"SH",
"fields",
"from",
"a",
"SH",
"identity"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/enrich.py#L692-L741 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/enrich.py | Enrich.get_item_sh | def get_item_sh(self, item, roles=None, date_field=None):
"""
Add sorting hat enrichment fields for different roles
If there are no roles, just add the author fields.
"""
eitem_sh = {} # Item enriched
author_field = self.get_field_author()
if not roles:
roles = [author_field]
if not date_field:
item_date = str_to_datetime(item[self.get_field_date()])
else:
item_date = str_to_datetime(item[date_field])
users_data = self.get_users_data(item)
for rol in roles:
if rol in users_data:
identity = self.get_sh_identity(item, rol)
eitem_sh.update(self.get_item_sh_fields(identity, item_date, rol=rol))
if not eitem_sh[rol + '_org_name']:
eitem_sh[rol + '_org_name'] = SH_UNKNOWN_VALUE
if not eitem_sh[rol + '_name']:
eitem_sh[rol + '_name'] = SH_UNKNOWN_VALUE
if not eitem_sh[rol + '_user_name']:
eitem_sh[rol + '_user_name'] = SH_UNKNOWN_VALUE
# Add the author field common in all data sources
rol_author = 'author'
if author_field in users_data and author_field != rol_author:
identity = self.get_sh_identity(item, author_field)
eitem_sh.update(self.get_item_sh_fields(identity, item_date, rol=rol_author))
if not eitem_sh['author_org_name']:
eitem_sh['author_org_name'] = SH_UNKNOWN_VALUE
if not eitem_sh['author_name']:
eitem_sh['author_name'] = SH_UNKNOWN_VALUE
if not eitem_sh['author_user_name']:
eitem_sh['author_user_name'] = SH_UNKNOWN_VALUE
return eitem_sh | python | def get_item_sh(self, item, roles=None, date_field=None):
"""
Add sorting hat enrichment fields for different roles
If there are no roles, just add the author fields.
"""
eitem_sh = {} # Item enriched
author_field = self.get_field_author()
if not roles:
roles = [author_field]
if not date_field:
item_date = str_to_datetime(item[self.get_field_date()])
else:
item_date = str_to_datetime(item[date_field])
users_data = self.get_users_data(item)
for rol in roles:
if rol in users_data:
identity = self.get_sh_identity(item, rol)
eitem_sh.update(self.get_item_sh_fields(identity, item_date, rol=rol))
if not eitem_sh[rol + '_org_name']:
eitem_sh[rol + '_org_name'] = SH_UNKNOWN_VALUE
if not eitem_sh[rol + '_name']:
eitem_sh[rol + '_name'] = SH_UNKNOWN_VALUE
if not eitem_sh[rol + '_user_name']:
eitem_sh[rol + '_user_name'] = SH_UNKNOWN_VALUE
# Add the author field common in all data sources
rol_author = 'author'
if author_field in users_data and author_field != rol_author:
identity = self.get_sh_identity(item, author_field)
eitem_sh.update(self.get_item_sh_fields(identity, item_date, rol=rol_author))
if not eitem_sh['author_org_name']:
eitem_sh['author_org_name'] = SH_UNKNOWN_VALUE
if not eitem_sh['author_name']:
eitem_sh['author_name'] = SH_UNKNOWN_VALUE
if not eitem_sh['author_user_name']:
eitem_sh['author_user_name'] = SH_UNKNOWN_VALUE
return eitem_sh | [
"def",
"get_item_sh",
"(",
"self",
",",
"item",
",",
"roles",
"=",
"None",
",",
"date_field",
"=",
"None",
")",
":",
"eitem_sh",
"=",
"{",
"}",
"# Item enriched",
"author_field",
"=",
"self",
".",
"get_field_author",
"(",
")",
"if",
"not",
"roles",
":",
"roles",
"=",
"[",
"author_field",
"]",
"if",
"not",
"date_field",
":",
"item_date",
"=",
"str_to_datetime",
"(",
"item",
"[",
"self",
".",
"get_field_date",
"(",
")",
"]",
")",
"else",
":",
"item_date",
"=",
"str_to_datetime",
"(",
"item",
"[",
"date_field",
"]",
")",
"users_data",
"=",
"self",
".",
"get_users_data",
"(",
"item",
")",
"for",
"rol",
"in",
"roles",
":",
"if",
"rol",
"in",
"users_data",
":",
"identity",
"=",
"self",
".",
"get_sh_identity",
"(",
"item",
",",
"rol",
")",
"eitem_sh",
".",
"update",
"(",
"self",
".",
"get_item_sh_fields",
"(",
"identity",
",",
"item_date",
",",
"rol",
"=",
"rol",
")",
")",
"if",
"not",
"eitem_sh",
"[",
"rol",
"+",
"'_org_name'",
"]",
":",
"eitem_sh",
"[",
"rol",
"+",
"'_org_name'",
"]",
"=",
"SH_UNKNOWN_VALUE",
"if",
"not",
"eitem_sh",
"[",
"rol",
"+",
"'_name'",
"]",
":",
"eitem_sh",
"[",
"rol",
"+",
"'_name'",
"]",
"=",
"SH_UNKNOWN_VALUE",
"if",
"not",
"eitem_sh",
"[",
"rol",
"+",
"'_user_name'",
"]",
":",
"eitem_sh",
"[",
"rol",
"+",
"'_user_name'",
"]",
"=",
"SH_UNKNOWN_VALUE",
"# Add the author field common in all data sources",
"rol_author",
"=",
"'author'",
"if",
"author_field",
"in",
"users_data",
"and",
"author_field",
"!=",
"rol_author",
":",
"identity",
"=",
"self",
".",
"get_sh_identity",
"(",
"item",
",",
"author_field",
")",
"eitem_sh",
".",
"update",
"(",
"self",
".",
"get_item_sh_fields",
"(",
"identity",
",",
"item_date",
",",
"rol",
"=",
"rol_author",
")",
")",
"if",
"not",
"eitem_sh",
"[",
"'author_org_name'",
"]",
":",
"eitem_sh",
"[",
"'author_org_name'",
"]",
"=",
"SH_UNKNOWN_VALUE",
"if",
"not",
"eitem_sh",
"[",
"'author_name'",
"]",
":",
"eitem_sh",
"[",
"'author_name'",
"]",
"=",
"SH_UNKNOWN_VALUE",
"if",
"not",
"eitem_sh",
"[",
"'author_user_name'",
"]",
":",
"eitem_sh",
"[",
"'author_user_name'",
"]",
"=",
"SH_UNKNOWN_VALUE",
"return",
"eitem_sh"
]
| Add sorting hat enrichment fields for different roles
If there are no roles, just add the author fields. | [
"Add",
"sorting",
"hat",
"enrichment",
"fields",
"for",
"different",
"roles"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/enrich.py#L801-L852 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/enrich.py | Enrich.get_sh_ids | def get_sh_ids(self, identity, backend_name):
""" Return the Sorting Hat id and uuid for an identity """
# Convert the dict to tuple so it is hashable
identity_tuple = tuple(identity.items())
sh_ids = self.__get_sh_ids_cache(identity_tuple, backend_name)
return sh_ids | python | def get_sh_ids(self, identity, backend_name):
""" Return the Sorting Hat id and uuid for an identity """
# Convert the dict to tuple so it is hashable
identity_tuple = tuple(identity.items())
sh_ids = self.__get_sh_ids_cache(identity_tuple, backend_name)
return sh_ids | [
"def",
"get_sh_ids",
"(",
"self",
",",
"identity",
",",
"backend_name",
")",
":",
"# Convert the dict to tuple so it is hashable",
"identity_tuple",
"=",
"tuple",
"(",
"identity",
".",
"items",
"(",
")",
")",
"sh_ids",
"=",
"self",
".",
"__get_sh_ids_cache",
"(",
"identity_tuple",
",",
"backend_name",
")",
"return",
"sh_ids"
]
| Return the Sorting Hat id and uuid for an identity | [
"Return",
"the",
"Sorting",
"Hat",
"id",
"and",
"uuid",
"for",
"an",
"identity"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/enrich.py#L867-L872 | train |
chaoss/grimoirelab-elk | grimoire_elk/elastic_items.py | ElasticItems.get_repository_filter_raw | def get_repository_filter_raw(self, term=False):
""" Returns the filter to be used in queries in a repository items """
perceval_backend_name = self.get_connector_name()
filter_ = get_repository_filter(self.perceval_backend, perceval_backend_name, term)
return filter_ | python | def get_repository_filter_raw(self, term=False):
""" Returns the filter to be used in queries in a repository items """
perceval_backend_name = self.get_connector_name()
filter_ = get_repository_filter(self.perceval_backend, perceval_backend_name, term)
return filter_ | [
"def",
"get_repository_filter_raw",
"(",
"self",
",",
"term",
"=",
"False",
")",
":",
"perceval_backend_name",
"=",
"self",
".",
"get_connector_name",
"(",
")",
"filter_",
"=",
"get_repository_filter",
"(",
"self",
".",
"perceval_backend",
",",
"perceval_backend_name",
",",
"term",
")",
"return",
"filter_"
]
| Returns the filter to be used in queries in a repository items | [
"Returns",
"the",
"filter",
"to",
"be",
"used",
"in",
"queries",
"in",
"a",
"repository",
"items"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elastic_items.py#L67-L71 | train |
chaoss/grimoirelab-elk | grimoire_elk/elastic_items.py | ElasticItems.set_filter_raw | def set_filter_raw(self, filter_raw):
"""Filter to be used when getting items from Ocean index"""
self.filter_raw = filter_raw
self.filter_raw_dict = []
splitted = re.compile(FILTER_SEPARATOR).split(filter_raw)
for fltr_raw in splitted:
fltr = self.__process_filter(fltr_raw)
self.filter_raw_dict.append(fltr) | python | def set_filter_raw(self, filter_raw):
"""Filter to be used when getting items from Ocean index"""
self.filter_raw = filter_raw
self.filter_raw_dict = []
splitted = re.compile(FILTER_SEPARATOR).split(filter_raw)
for fltr_raw in splitted:
fltr = self.__process_filter(fltr_raw)
self.filter_raw_dict.append(fltr) | [
"def",
"set_filter_raw",
"(",
"self",
",",
"filter_raw",
")",
":",
"self",
".",
"filter_raw",
"=",
"filter_raw",
"self",
".",
"filter_raw_dict",
"=",
"[",
"]",
"splitted",
"=",
"re",
".",
"compile",
"(",
"FILTER_SEPARATOR",
")",
".",
"split",
"(",
"filter_raw",
")",
"for",
"fltr_raw",
"in",
"splitted",
":",
"fltr",
"=",
"self",
".",
"__process_filter",
"(",
"fltr_raw",
")",
"self",
".",
"filter_raw_dict",
".",
"append",
"(",
"fltr",
")"
]
| Filter to be used when getting items from Ocean index | [
"Filter",
"to",
"be",
"used",
"when",
"getting",
"items",
"from",
"Ocean",
"index"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elastic_items.py#L104-L114 | train |
chaoss/grimoirelab-elk | grimoire_elk/elastic_items.py | ElasticItems.set_filter_raw_should | def set_filter_raw_should(self, filter_raw_should):
"""Bool filter should to be used when getting items from Ocean index"""
self.filter_raw_should = filter_raw_should
self.filter_raw_should_dict = []
splitted = re.compile(FILTER_SEPARATOR).split(filter_raw_should)
for fltr_raw in splitted:
fltr = self.__process_filter(fltr_raw)
self.filter_raw_should_dict.append(fltr) | python | def set_filter_raw_should(self, filter_raw_should):
"""Bool filter should to be used when getting items from Ocean index"""
self.filter_raw_should = filter_raw_should
self.filter_raw_should_dict = []
splitted = re.compile(FILTER_SEPARATOR).split(filter_raw_should)
for fltr_raw in splitted:
fltr = self.__process_filter(fltr_raw)
self.filter_raw_should_dict.append(fltr) | [
"def",
"set_filter_raw_should",
"(",
"self",
",",
"filter_raw_should",
")",
":",
"self",
".",
"filter_raw_should",
"=",
"filter_raw_should",
"self",
".",
"filter_raw_should_dict",
"=",
"[",
"]",
"splitted",
"=",
"re",
".",
"compile",
"(",
"FILTER_SEPARATOR",
")",
".",
"split",
"(",
"filter_raw_should",
")",
"for",
"fltr_raw",
"in",
"splitted",
":",
"fltr",
"=",
"self",
".",
"__process_filter",
"(",
"fltr_raw",
")",
"self",
".",
"filter_raw_should_dict",
".",
"append",
"(",
"fltr",
")"
]
| Bool filter should to be used when getting items from Ocean index | [
"Bool",
"filter",
"should",
"to",
"be",
"used",
"when",
"getting",
"items",
"from",
"Ocean",
"index"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elastic_items.py#L116-L126 | train |
chaoss/grimoirelab-elk | grimoire_elk/elastic_items.py | ElasticItems.fetch | def fetch(self, _filter=None, ignore_incremental=False):
""" Fetch the items from raw or enriched index. An optional _filter
could be provided to filter the data collected """
logger.debug("Creating a elastic items generator.")
scroll_id = None
page = self.get_elastic_items(scroll_id, _filter=_filter, ignore_incremental=ignore_incremental)
if not page:
return []
scroll_id = page["_scroll_id"]
scroll_size = page['hits']['total']
if scroll_size == 0:
logger.warning("No results found from %s", self.elastic.anonymize_url(self.elastic.index_url))
return
while scroll_size > 0:
logger.debug("Fetching from %s: %d received", self.elastic.anonymize_url(self.elastic.index_url),
len(page['hits']['hits']))
for item in page['hits']['hits']:
eitem = item['_source']
yield eitem
page = self.get_elastic_items(scroll_id, _filter=_filter, ignore_incremental=ignore_incremental)
if not page:
break
scroll_size = len(page['hits']['hits'])
logger.debug("Fetching from %s: done receiving", self.elastic.anonymize_url(self.elastic.index_url)) | python | def fetch(self, _filter=None, ignore_incremental=False):
""" Fetch the items from raw or enriched index. An optional _filter
could be provided to filter the data collected """
logger.debug("Creating a elastic items generator.")
scroll_id = None
page = self.get_elastic_items(scroll_id, _filter=_filter, ignore_incremental=ignore_incremental)
if not page:
return []
scroll_id = page["_scroll_id"]
scroll_size = page['hits']['total']
if scroll_size == 0:
logger.warning("No results found from %s", self.elastic.anonymize_url(self.elastic.index_url))
return
while scroll_size > 0:
logger.debug("Fetching from %s: %d received", self.elastic.anonymize_url(self.elastic.index_url),
len(page['hits']['hits']))
for item in page['hits']['hits']:
eitem = item['_source']
yield eitem
page = self.get_elastic_items(scroll_id, _filter=_filter, ignore_incremental=ignore_incremental)
if not page:
break
scroll_size = len(page['hits']['hits'])
logger.debug("Fetching from %s: done receiving", self.elastic.anonymize_url(self.elastic.index_url)) | [
"def",
"fetch",
"(",
"self",
",",
"_filter",
"=",
"None",
",",
"ignore_incremental",
"=",
"False",
")",
":",
"logger",
".",
"debug",
"(",
"\"Creating a elastic items generator.\"",
")",
"scroll_id",
"=",
"None",
"page",
"=",
"self",
".",
"get_elastic_items",
"(",
"scroll_id",
",",
"_filter",
"=",
"_filter",
",",
"ignore_incremental",
"=",
"ignore_incremental",
")",
"if",
"not",
"page",
":",
"return",
"[",
"]",
"scroll_id",
"=",
"page",
"[",
"\"_scroll_id\"",
"]",
"scroll_size",
"=",
"page",
"[",
"'hits'",
"]",
"[",
"'total'",
"]",
"if",
"scroll_size",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"\"No results found from %s\"",
",",
"self",
".",
"elastic",
".",
"anonymize_url",
"(",
"self",
".",
"elastic",
".",
"index_url",
")",
")",
"return",
"while",
"scroll_size",
">",
"0",
":",
"logger",
".",
"debug",
"(",
"\"Fetching from %s: %d received\"",
",",
"self",
".",
"elastic",
".",
"anonymize_url",
"(",
"self",
".",
"elastic",
".",
"index_url",
")",
",",
"len",
"(",
"page",
"[",
"'hits'",
"]",
"[",
"'hits'",
"]",
")",
")",
"for",
"item",
"in",
"page",
"[",
"'hits'",
"]",
"[",
"'hits'",
"]",
":",
"eitem",
"=",
"item",
"[",
"'_source'",
"]",
"yield",
"eitem",
"page",
"=",
"self",
".",
"get_elastic_items",
"(",
"scroll_id",
",",
"_filter",
"=",
"_filter",
",",
"ignore_incremental",
"=",
"ignore_incremental",
")",
"if",
"not",
"page",
":",
"break",
"scroll_size",
"=",
"len",
"(",
"page",
"[",
"'hits'",
"]",
"[",
"'hits'",
"]",
")",
"logger",
".",
"debug",
"(",
"\"Fetching from %s: done receiving\"",
",",
"self",
".",
"elastic",
".",
"anonymize_url",
"(",
"self",
".",
"elastic",
".",
"index_url",
")",
")"
]
| Fetch the items from raw or enriched index. An optional _filter
could be provided to filter the data collected | [
"Fetch",
"the",
"items",
"from",
"raw",
"or",
"enriched",
"index",
".",
"An",
"optional",
"_filter",
"could",
"be",
"provided",
"to",
"filter",
"the",
"data",
"collected"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elastic_items.py#L140-L174 | train |
chaoss/grimoirelab-elk | utils/index_mapping.py | find_uuid | def find_uuid(es_url, index):
""" Find the unique identifier field for a given index """
uid_field = None
# Get the first item to detect the data source and raw/enriched type
res = requests.get('%s/%s/_search?size=1' % (es_url, index))
first_item = res.json()['hits']['hits'][0]['_source']
fields = first_item.keys()
if 'uuid' in fields:
uid_field = 'uuid'
else:
# Non perceval backend
uuid_value = res.json()['hits']['hits'][0]['_id']
logging.debug("Finding unique id for %s with value %s", index, uuid_value)
for field in fields:
if first_item[field] == uuid_value:
logging.debug("Found unique id for %s: %s", index, field)
uid_field = field
break
if not uid_field:
logging.error("Can not find uid field for %s. Can not copy the index.", index)
logging.error("Try to copy it directly with elasticdump or similar.")
sys.exit(1)
return uid_field | python | def find_uuid(es_url, index):
""" Find the unique identifier field for a given index """
uid_field = None
# Get the first item to detect the data source and raw/enriched type
res = requests.get('%s/%s/_search?size=1' % (es_url, index))
first_item = res.json()['hits']['hits'][0]['_source']
fields = first_item.keys()
if 'uuid' in fields:
uid_field = 'uuid'
else:
# Non perceval backend
uuid_value = res.json()['hits']['hits'][0]['_id']
logging.debug("Finding unique id for %s with value %s", index, uuid_value)
for field in fields:
if first_item[field] == uuid_value:
logging.debug("Found unique id for %s: %s", index, field)
uid_field = field
break
if not uid_field:
logging.error("Can not find uid field for %s. Can not copy the index.", index)
logging.error("Try to copy it directly with elasticdump or similar.")
sys.exit(1)
return uid_field | [
"def",
"find_uuid",
"(",
"es_url",
",",
"index",
")",
":",
"uid_field",
"=",
"None",
"# Get the first item to detect the data source and raw/enriched type",
"res",
"=",
"requests",
".",
"get",
"(",
"'%s/%s/_search?size=1'",
"%",
"(",
"es_url",
",",
"index",
")",
")",
"first_item",
"=",
"res",
".",
"json",
"(",
")",
"[",
"'hits'",
"]",
"[",
"'hits'",
"]",
"[",
"0",
"]",
"[",
"'_source'",
"]",
"fields",
"=",
"first_item",
".",
"keys",
"(",
")",
"if",
"'uuid'",
"in",
"fields",
":",
"uid_field",
"=",
"'uuid'",
"else",
":",
"# Non perceval backend",
"uuid_value",
"=",
"res",
".",
"json",
"(",
")",
"[",
"'hits'",
"]",
"[",
"'hits'",
"]",
"[",
"0",
"]",
"[",
"'_id'",
"]",
"logging",
".",
"debug",
"(",
"\"Finding unique id for %s with value %s\"",
",",
"index",
",",
"uuid_value",
")",
"for",
"field",
"in",
"fields",
":",
"if",
"first_item",
"[",
"field",
"]",
"==",
"uuid_value",
":",
"logging",
".",
"debug",
"(",
"\"Found unique id for %s: %s\"",
",",
"index",
",",
"field",
")",
"uid_field",
"=",
"field",
"break",
"if",
"not",
"uid_field",
":",
"logging",
".",
"error",
"(",
"\"Can not find uid field for %s. Can not copy the index.\"",
",",
"index",
")",
"logging",
".",
"error",
"(",
"\"Try to copy it directly with elasticdump or similar.\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"uid_field"
]
| Find the unique identifier field for a given index | [
"Find",
"the",
"unique",
"identifier",
"field",
"for",
"a",
"given",
"index"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/utils/index_mapping.py#L62-L89 | train |
chaoss/grimoirelab-elk | utils/index_mapping.py | find_mapping | def find_mapping(es_url, index):
""" Find the mapping given an index """
mapping = None
backend = find_perceval_backend(es_url, index)
if backend:
mapping = backend.get_elastic_mappings()
if mapping:
logging.debug("MAPPING FOUND:\n%s", json.dumps(json.loads(mapping['items']), indent=True))
return mapping | python | def find_mapping(es_url, index):
""" Find the mapping given an index """
mapping = None
backend = find_perceval_backend(es_url, index)
if backend:
mapping = backend.get_elastic_mappings()
if mapping:
logging.debug("MAPPING FOUND:\n%s", json.dumps(json.loads(mapping['items']), indent=True))
return mapping | [
"def",
"find_mapping",
"(",
"es_url",
",",
"index",
")",
":",
"mapping",
"=",
"None",
"backend",
"=",
"find_perceval_backend",
"(",
"es_url",
",",
"index",
")",
"if",
"backend",
":",
"mapping",
"=",
"backend",
".",
"get_elastic_mappings",
"(",
")",
"if",
"mapping",
":",
"logging",
".",
"debug",
"(",
"\"MAPPING FOUND:\\n%s\"",
",",
"json",
".",
"dumps",
"(",
"json",
".",
"loads",
"(",
"mapping",
"[",
"'items'",
"]",
")",
",",
"indent",
"=",
"True",
")",
")",
"return",
"mapping"
]
| Find the mapping given an index | [
"Find",
"the",
"mapping",
"given",
"an",
"index"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/utils/index_mapping.py#L130-L142 | train |
chaoss/grimoirelab-elk | utils/index_mapping.py | get_elastic_items | def get_elastic_items(elastic, elastic_scroll_id=None, limit=None):
""" Get the items from the index """
scroll_size = limit
if not limit:
scroll_size = DEFAULT_LIMIT
if not elastic:
return None
url = elastic.index_url
max_process_items_pack_time = "5m" # 10 minutes
url += "/_search?scroll=%s&size=%i" % (max_process_items_pack_time,
scroll_size)
if elastic_scroll_id:
# Just continue with the scrolling
url = elastic.url
url += "/_search/scroll"
scroll_data = {
"scroll": max_process_items_pack_time,
"scroll_id": elastic_scroll_id
}
res = requests.post(url, data=json.dumps(scroll_data))
else:
query = """
{
"query": {
"bool": {
"must": []
}
}
}
"""
logging.debug("%s\n%s", url, json.dumps(json.loads(query), indent=4))
res = requests.post(url, data=query)
rjson = None
try:
rjson = res.json()
except Exception:
logging.error("No JSON found in %s", res.text)
logging.error("No results found from %s", url)
return rjson | python | def get_elastic_items(elastic, elastic_scroll_id=None, limit=None):
""" Get the items from the index """
scroll_size = limit
if not limit:
scroll_size = DEFAULT_LIMIT
if not elastic:
return None
url = elastic.index_url
max_process_items_pack_time = "5m" # 10 minutes
url += "/_search?scroll=%s&size=%i" % (max_process_items_pack_time,
scroll_size)
if elastic_scroll_id:
# Just continue with the scrolling
url = elastic.url
url += "/_search/scroll"
scroll_data = {
"scroll": max_process_items_pack_time,
"scroll_id": elastic_scroll_id
}
res = requests.post(url, data=json.dumps(scroll_data))
else:
query = """
{
"query": {
"bool": {
"must": []
}
}
}
"""
logging.debug("%s\n%s", url, json.dumps(json.loads(query), indent=4))
res = requests.post(url, data=query)
rjson = None
try:
rjson = res.json()
except Exception:
logging.error("No JSON found in %s", res.text)
logging.error("No results found from %s", url)
return rjson | [
"def",
"get_elastic_items",
"(",
"elastic",
",",
"elastic_scroll_id",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"scroll_size",
"=",
"limit",
"if",
"not",
"limit",
":",
"scroll_size",
"=",
"DEFAULT_LIMIT",
"if",
"not",
"elastic",
":",
"return",
"None",
"url",
"=",
"elastic",
".",
"index_url",
"max_process_items_pack_time",
"=",
"\"5m\"",
"# 10 minutes",
"url",
"+=",
"\"/_search?scroll=%s&size=%i\"",
"%",
"(",
"max_process_items_pack_time",
",",
"scroll_size",
")",
"if",
"elastic_scroll_id",
":",
"# Just continue with the scrolling",
"url",
"=",
"elastic",
".",
"url",
"url",
"+=",
"\"/_search/scroll\"",
"scroll_data",
"=",
"{",
"\"scroll\"",
":",
"max_process_items_pack_time",
",",
"\"scroll_id\"",
":",
"elastic_scroll_id",
"}",
"res",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"scroll_data",
")",
")",
"else",
":",
"query",
"=",
"\"\"\"\n {\n \"query\": {\n \"bool\": {\n \"must\": []\n }\n }\n }\n \"\"\"",
"logging",
".",
"debug",
"(",
"\"%s\\n%s\"",
",",
"url",
",",
"json",
".",
"dumps",
"(",
"json",
".",
"loads",
"(",
"query",
")",
",",
"indent",
"=",
"4",
")",
")",
"res",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"data",
"=",
"query",
")",
"rjson",
"=",
"None",
"try",
":",
"rjson",
"=",
"res",
".",
"json",
"(",
")",
"except",
"Exception",
":",
"logging",
".",
"error",
"(",
"\"No JSON found in %s\"",
",",
"res",
".",
"text",
")",
"logging",
".",
"error",
"(",
"\"No results found from %s\"",
",",
"url",
")",
"return",
"rjson"
]
| Get the items from the index | [
"Get",
"the",
"items",
"from",
"the",
"index"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/utils/index_mapping.py#L145-L190 | train |
chaoss/grimoirelab-elk | utils/index_mapping.py | fetch | def fetch(elastic, backend, limit=None, search_after_value=None, scroll=True):
""" Fetch the items from raw or enriched index """
logging.debug("Creating a elastic items generator.")
elastic_scroll_id = None
search_after = search_after_value
while True:
if scroll:
rjson = get_elastic_items(elastic, elastic_scroll_id, limit)
else:
rjson = get_elastic_items_search(elastic, search_after, limit)
if rjson and "_scroll_id" in rjson:
elastic_scroll_id = rjson["_scroll_id"]
if rjson and "hits" in rjson:
if not rjson["hits"]["hits"]:
break
for hit in rjson["hits"]["hits"]:
item = hit['_source']
if 'sort' in hit:
search_after = hit['sort']
try:
backend._fix_item(item)
except Exception:
pass
yield item
else:
logging.error("No results found from %s", elastic.index_url)
break
return | python | def fetch(elastic, backend, limit=None, search_after_value=None, scroll=True):
""" Fetch the items from raw or enriched index """
logging.debug("Creating a elastic items generator.")
elastic_scroll_id = None
search_after = search_after_value
while True:
if scroll:
rjson = get_elastic_items(elastic, elastic_scroll_id, limit)
else:
rjson = get_elastic_items_search(elastic, search_after, limit)
if rjson and "_scroll_id" in rjson:
elastic_scroll_id = rjson["_scroll_id"]
if rjson and "hits" in rjson:
if not rjson["hits"]["hits"]:
break
for hit in rjson["hits"]["hits"]:
item = hit['_source']
if 'sort' in hit:
search_after = hit['sort']
try:
backend._fix_item(item)
except Exception:
pass
yield item
else:
logging.error("No results found from %s", elastic.index_url)
break
return | [
"def",
"fetch",
"(",
"elastic",
",",
"backend",
",",
"limit",
"=",
"None",
",",
"search_after_value",
"=",
"None",
",",
"scroll",
"=",
"True",
")",
":",
"logging",
".",
"debug",
"(",
"\"Creating a elastic items generator.\"",
")",
"elastic_scroll_id",
"=",
"None",
"search_after",
"=",
"search_after_value",
"while",
"True",
":",
"if",
"scroll",
":",
"rjson",
"=",
"get_elastic_items",
"(",
"elastic",
",",
"elastic_scroll_id",
",",
"limit",
")",
"else",
":",
"rjson",
"=",
"get_elastic_items_search",
"(",
"elastic",
",",
"search_after",
",",
"limit",
")",
"if",
"rjson",
"and",
"\"_scroll_id\"",
"in",
"rjson",
":",
"elastic_scroll_id",
"=",
"rjson",
"[",
"\"_scroll_id\"",
"]",
"if",
"rjson",
"and",
"\"hits\"",
"in",
"rjson",
":",
"if",
"not",
"rjson",
"[",
"\"hits\"",
"]",
"[",
"\"hits\"",
"]",
":",
"break",
"for",
"hit",
"in",
"rjson",
"[",
"\"hits\"",
"]",
"[",
"\"hits\"",
"]",
":",
"item",
"=",
"hit",
"[",
"'_source'",
"]",
"if",
"'sort'",
"in",
"hit",
":",
"search_after",
"=",
"hit",
"[",
"'sort'",
"]",
"try",
":",
"backend",
".",
"_fix_item",
"(",
"item",
")",
"except",
"Exception",
":",
"pass",
"yield",
"item",
"else",
":",
"logging",
".",
"error",
"(",
"\"No results found from %s\"",
",",
"elastic",
".",
"index_url",
")",
"break",
"return"
]
| Fetch the items from raw or enriched index | [
"Fetch",
"the",
"items",
"from",
"raw",
"or",
"enriched",
"index"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/utils/index_mapping.py#L261-L294 | train |
chaoss/grimoirelab-elk | utils/index_mapping.py | export_items | def export_items(elastic_url, in_index, out_index, elastic_url_out=None,
search_after=False, search_after_value=None, limit=None,
copy=False):
""" Export items from in_index to out_index using the correct mapping """
if not limit:
limit = DEFAULT_LIMIT
if search_after_value:
search_after_value_timestamp = int(search_after_value[0])
search_after_value_uuid = search_after_value[1]
search_after_value = [search_after_value_timestamp, search_after_value_uuid]
logging.info("Exporting items from %s/%s to %s", elastic_url, in_index, out_index)
count_res = requests.get('%s/%s/_count' % (elastic_url, in_index))
try:
count_res.raise_for_status()
except requests.exceptions.HTTPError:
if count_res.status_code == 404:
logging.error("The index does not exists: %s", in_index)
else:
logging.error(count_res.text)
sys.exit(1)
logging.info("Total items to copy: %i", count_res.json()['count'])
# Time to upload the items with the correct mapping
elastic_in = ElasticSearch(elastic_url, in_index)
if not copy:
# Create the correct mapping for the data sources detected from in_index
ds_mapping = find_mapping(elastic_url, in_index)
else:
logging.debug('Using the input index mapping')
ds_mapping = extract_mapping(elastic_url, in_index)
if not elastic_url_out:
elastic_out = ElasticSearch(elastic_url, out_index, mappings=ds_mapping)
else:
elastic_out = ElasticSearch(elastic_url_out, out_index, mappings=ds_mapping)
# Time to just copy from in_index to our_index
uid_field = find_uuid(elastic_url, in_index)
backend = find_perceval_backend(elastic_url, in_index)
if search_after:
total = elastic_out.bulk_upload(fetch(elastic_in, backend, limit,
search_after_value, scroll=False), uid_field)
else:
total = elastic_out.bulk_upload(fetch(elastic_in, backend, limit), uid_field)
logging.info("Total items copied: %i", total) | python | def export_items(elastic_url, in_index, out_index, elastic_url_out=None,
search_after=False, search_after_value=None, limit=None,
copy=False):
""" Export items from in_index to out_index using the correct mapping """
if not limit:
limit = DEFAULT_LIMIT
if search_after_value:
search_after_value_timestamp = int(search_after_value[0])
search_after_value_uuid = search_after_value[1]
search_after_value = [search_after_value_timestamp, search_after_value_uuid]
logging.info("Exporting items from %s/%s to %s", elastic_url, in_index, out_index)
count_res = requests.get('%s/%s/_count' % (elastic_url, in_index))
try:
count_res.raise_for_status()
except requests.exceptions.HTTPError:
if count_res.status_code == 404:
logging.error("The index does not exists: %s", in_index)
else:
logging.error(count_res.text)
sys.exit(1)
logging.info("Total items to copy: %i", count_res.json()['count'])
# Time to upload the items with the correct mapping
elastic_in = ElasticSearch(elastic_url, in_index)
if not copy:
# Create the correct mapping for the data sources detected from in_index
ds_mapping = find_mapping(elastic_url, in_index)
else:
logging.debug('Using the input index mapping')
ds_mapping = extract_mapping(elastic_url, in_index)
if not elastic_url_out:
elastic_out = ElasticSearch(elastic_url, out_index, mappings=ds_mapping)
else:
elastic_out = ElasticSearch(elastic_url_out, out_index, mappings=ds_mapping)
# Time to just copy from in_index to our_index
uid_field = find_uuid(elastic_url, in_index)
backend = find_perceval_backend(elastic_url, in_index)
if search_after:
total = elastic_out.bulk_upload(fetch(elastic_in, backend, limit,
search_after_value, scroll=False), uid_field)
else:
total = elastic_out.bulk_upload(fetch(elastic_in, backend, limit), uid_field)
logging.info("Total items copied: %i", total) | [
"def",
"export_items",
"(",
"elastic_url",
",",
"in_index",
",",
"out_index",
",",
"elastic_url_out",
"=",
"None",
",",
"search_after",
"=",
"False",
",",
"search_after_value",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"if",
"not",
"limit",
":",
"limit",
"=",
"DEFAULT_LIMIT",
"if",
"search_after_value",
":",
"search_after_value_timestamp",
"=",
"int",
"(",
"search_after_value",
"[",
"0",
"]",
")",
"search_after_value_uuid",
"=",
"search_after_value",
"[",
"1",
"]",
"search_after_value",
"=",
"[",
"search_after_value_timestamp",
",",
"search_after_value_uuid",
"]",
"logging",
".",
"info",
"(",
"\"Exporting items from %s/%s to %s\"",
",",
"elastic_url",
",",
"in_index",
",",
"out_index",
")",
"count_res",
"=",
"requests",
".",
"get",
"(",
"'%s/%s/_count'",
"%",
"(",
"elastic_url",
",",
"in_index",
")",
")",
"try",
":",
"count_res",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
":",
"if",
"count_res",
".",
"status_code",
"==",
"404",
":",
"logging",
".",
"error",
"(",
"\"The index does not exists: %s\"",
",",
"in_index",
")",
"else",
":",
"logging",
".",
"error",
"(",
"count_res",
".",
"text",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"logging",
".",
"info",
"(",
"\"Total items to copy: %i\"",
",",
"count_res",
".",
"json",
"(",
")",
"[",
"'count'",
"]",
")",
"# Time to upload the items with the correct mapping",
"elastic_in",
"=",
"ElasticSearch",
"(",
"elastic_url",
",",
"in_index",
")",
"if",
"not",
"copy",
":",
"# Create the correct mapping for the data sources detected from in_index",
"ds_mapping",
"=",
"find_mapping",
"(",
"elastic_url",
",",
"in_index",
")",
"else",
":",
"logging",
".",
"debug",
"(",
"'Using the input index mapping'",
")",
"ds_mapping",
"=",
"extract_mapping",
"(",
"elastic_url",
",",
"in_index",
")",
"if",
"not",
"elastic_url_out",
":",
"elastic_out",
"=",
"ElasticSearch",
"(",
"elastic_url",
",",
"out_index",
",",
"mappings",
"=",
"ds_mapping",
")",
"else",
":",
"elastic_out",
"=",
"ElasticSearch",
"(",
"elastic_url_out",
",",
"out_index",
",",
"mappings",
"=",
"ds_mapping",
")",
"# Time to just copy from in_index to our_index",
"uid_field",
"=",
"find_uuid",
"(",
"elastic_url",
",",
"in_index",
")",
"backend",
"=",
"find_perceval_backend",
"(",
"elastic_url",
",",
"in_index",
")",
"if",
"search_after",
":",
"total",
"=",
"elastic_out",
".",
"bulk_upload",
"(",
"fetch",
"(",
"elastic_in",
",",
"backend",
",",
"limit",
",",
"search_after_value",
",",
"scroll",
"=",
"False",
")",
",",
"uid_field",
")",
"else",
":",
"total",
"=",
"elastic_out",
".",
"bulk_upload",
"(",
"fetch",
"(",
"elastic_in",
",",
"backend",
",",
"limit",
")",
",",
"uid_field",
")",
"logging",
".",
"info",
"(",
"\"Total items copied: %i\"",
",",
"total",
")"
]
| Export items from in_index to out_index using the correct mapping | [
"Export",
"items",
"from",
"in_index",
"to",
"out_index",
"using",
"the",
"correct",
"mapping"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/utils/index_mapping.py#L297-L347 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/gerrit.py | GerritEnrich._fix_review_dates | def _fix_review_dates(self, item):
"""Convert dates so ES detect them"""
for date_field in ['timestamp', 'createdOn', 'lastUpdated']:
if date_field in item.keys():
date_ts = item[date_field]
item[date_field] = unixtime_to_datetime(date_ts).isoformat()
if 'patchSets' in item.keys():
for patch in item['patchSets']:
pdate_ts = patch['createdOn']
patch['createdOn'] = unixtime_to_datetime(pdate_ts).isoformat()
if 'approvals' in patch:
for approval in patch['approvals']:
adate_ts = approval['grantedOn']
approval['grantedOn'] = unixtime_to_datetime(adate_ts).isoformat()
if 'comments' in item.keys():
for comment in item['comments']:
cdate_ts = comment['timestamp']
comment['timestamp'] = unixtime_to_datetime(cdate_ts).isoformat() | python | def _fix_review_dates(self, item):
"""Convert dates so ES detect them"""
for date_field in ['timestamp', 'createdOn', 'lastUpdated']:
if date_field in item.keys():
date_ts = item[date_field]
item[date_field] = unixtime_to_datetime(date_ts).isoformat()
if 'patchSets' in item.keys():
for patch in item['patchSets']:
pdate_ts = patch['createdOn']
patch['createdOn'] = unixtime_to_datetime(pdate_ts).isoformat()
if 'approvals' in patch:
for approval in patch['approvals']:
adate_ts = approval['grantedOn']
approval['grantedOn'] = unixtime_to_datetime(adate_ts).isoformat()
if 'comments' in item.keys():
for comment in item['comments']:
cdate_ts = comment['timestamp']
comment['timestamp'] = unixtime_to_datetime(cdate_ts).isoformat() | [
"def",
"_fix_review_dates",
"(",
"self",
",",
"item",
")",
":",
"for",
"date_field",
"in",
"[",
"'timestamp'",
",",
"'createdOn'",
",",
"'lastUpdated'",
"]",
":",
"if",
"date_field",
"in",
"item",
".",
"keys",
"(",
")",
":",
"date_ts",
"=",
"item",
"[",
"date_field",
"]",
"item",
"[",
"date_field",
"]",
"=",
"unixtime_to_datetime",
"(",
"date_ts",
")",
".",
"isoformat",
"(",
")",
"if",
"'patchSets'",
"in",
"item",
".",
"keys",
"(",
")",
":",
"for",
"patch",
"in",
"item",
"[",
"'patchSets'",
"]",
":",
"pdate_ts",
"=",
"patch",
"[",
"'createdOn'",
"]",
"patch",
"[",
"'createdOn'",
"]",
"=",
"unixtime_to_datetime",
"(",
"pdate_ts",
")",
".",
"isoformat",
"(",
")",
"if",
"'approvals'",
"in",
"patch",
":",
"for",
"approval",
"in",
"patch",
"[",
"'approvals'",
"]",
":",
"adate_ts",
"=",
"approval",
"[",
"'grantedOn'",
"]",
"approval",
"[",
"'grantedOn'",
"]",
"=",
"unixtime_to_datetime",
"(",
"adate_ts",
")",
".",
"isoformat",
"(",
")",
"if",
"'comments'",
"in",
"item",
".",
"keys",
"(",
")",
":",
"for",
"comment",
"in",
"item",
"[",
"'comments'",
"]",
":",
"cdate_ts",
"=",
"comment",
"[",
"'timestamp'",
"]",
"comment",
"[",
"'timestamp'",
"]",
"=",
"unixtime_to_datetime",
"(",
"cdate_ts",
")",
".",
"isoformat",
"(",
")"
]
| Convert dates so ES detect them | [
"Convert",
"dates",
"so",
"ES",
"detect",
"them"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/gerrit.py#L145-L166 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/bugzilla.py | BugzillaEnrich.get_sh_identity | def get_sh_identity(self, item, identity_field=None):
""" Return a Sorting Hat identity using bugzilla user data """
def fill_list_identity(identity, user_list_data):
""" Fill identity with user data in first item in list """
identity['username'] = user_list_data[0]['__text__']
if '@' in identity['username']:
identity['email'] = identity['username']
if 'name' in user_list_data[0]:
identity['name'] = user_list_data[0]['name']
return identity
identity = {}
for field in ['name', 'email', 'username']:
# Basic fields in Sorting Hat
identity[field] = None
user = item # by default a specific user dict is used
if 'data' in item and type(item) == dict:
user = item['data'][identity_field]
identity = fill_list_identity(identity, user)
return identity | python | def get_sh_identity(self, item, identity_field=None):
""" Return a Sorting Hat identity using bugzilla user data """
def fill_list_identity(identity, user_list_data):
""" Fill identity with user data in first item in list """
identity['username'] = user_list_data[0]['__text__']
if '@' in identity['username']:
identity['email'] = identity['username']
if 'name' in user_list_data[0]:
identity['name'] = user_list_data[0]['name']
return identity
identity = {}
for field in ['name', 'email', 'username']:
# Basic fields in Sorting Hat
identity[field] = None
user = item # by default a specific user dict is used
if 'data' in item and type(item) == dict:
user = item['data'][identity_field]
identity = fill_list_identity(identity, user)
return identity | [
"def",
"get_sh_identity",
"(",
"self",
",",
"item",
",",
"identity_field",
"=",
"None",
")",
":",
"def",
"fill_list_identity",
"(",
"identity",
",",
"user_list_data",
")",
":",
"\"\"\" Fill identity with user data in first item in list \"\"\"",
"identity",
"[",
"'username'",
"]",
"=",
"user_list_data",
"[",
"0",
"]",
"[",
"'__text__'",
"]",
"if",
"'@'",
"in",
"identity",
"[",
"'username'",
"]",
":",
"identity",
"[",
"'email'",
"]",
"=",
"identity",
"[",
"'username'",
"]",
"if",
"'name'",
"in",
"user_list_data",
"[",
"0",
"]",
":",
"identity",
"[",
"'name'",
"]",
"=",
"user_list_data",
"[",
"0",
"]",
"[",
"'name'",
"]",
"return",
"identity",
"identity",
"=",
"{",
"}",
"for",
"field",
"in",
"[",
"'name'",
",",
"'email'",
",",
"'username'",
"]",
":",
"# Basic fields in Sorting Hat",
"identity",
"[",
"field",
"]",
"=",
"None",
"user",
"=",
"item",
"# by default a specific user dict is used",
"if",
"'data'",
"in",
"item",
"and",
"type",
"(",
"item",
")",
"==",
"dict",
":",
"user",
"=",
"item",
"[",
"'data'",
"]",
"[",
"identity_field",
"]",
"identity",
"=",
"fill_list_identity",
"(",
"identity",
",",
"user",
")",
"return",
"identity"
]
| Return a Sorting Hat identity using bugzilla user data | [
"Return",
"a",
"Sorting",
"Hat",
"identity",
"using",
"bugzilla",
"user",
"data"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/bugzilla.py#L47-L70 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/ceres_base.py | CeresBase.analyze | def analyze(self):
"""Populate an enriched index by processing input items in blocks.
:return: total number of out_items written.
"""
from_date = self._out.latest_date()
if from_date:
logger.info("Reading items since " + from_date)
else:
logger.info("Reading items since the beginning of times")
cont = 0
total_processed = 0
total_written = 0
for item_block in self._in.read_block(size=self._block_size, from_date=from_date):
cont = cont + len(item_block)
process_results = self.process(item_block)
total_processed += process_results.processed
if len(process_results.out_items) > 0:
self._out.write(process_results.out_items)
total_written += len(process_results.out_items)
else:
logger.info("No new items to be written this time.")
logger.info(
"Items read/to be written/total read/total processed/total written: "
"{0}/{1}/{2}/{3}/{4}".format(str(len(item_block)),
str(len(process_results.out_items)),
str(cont),
str(total_processed),
str(total_written)))
logger.info("SUMMARY: Items total read/total processed/total written: "
"{0}/{1}/{2}".format(str(cont),
str(total_processed),
str(total_written)))
logger.info("This is the end.")
return total_written | python | def analyze(self):
"""Populate an enriched index by processing input items in blocks.
:return: total number of out_items written.
"""
from_date = self._out.latest_date()
if from_date:
logger.info("Reading items since " + from_date)
else:
logger.info("Reading items since the beginning of times")
cont = 0
total_processed = 0
total_written = 0
for item_block in self._in.read_block(size=self._block_size, from_date=from_date):
cont = cont + len(item_block)
process_results = self.process(item_block)
total_processed += process_results.processed
if len(process_results.out_items) > 0:
self._out.write(process_results.out_items)
total_written += len(process_results.out_items)
else:
logger.info("No new items to be written this time.")
logger.info(
"Items read/to be written/total read/total processed/total written: "
"{0}/{1}/{2}/{3}/{4}".format(str(len(item_block)),
str(len(process_results.out_items)),
str(cont),
str(total_processed),
str(total_written)))
logger.info("SUMMARY: Items total read/total processed/total written: "
"{0}/{1}/{2}".format(str(cont),
str(total_processed),
str(total_written)))
logger.info("This is the end.")
return total_written | [
"def",
"analyze",
"(",
"self",
")",
":",
"from_date",
"=",
"self",
".",
"_out",
".",
"latest_date",
"(",
")",
"if",
"from_date",
":",
"logger",
".",
"info",
"(",
"\"Reading items since \"",
"+",
"from_date",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Reading items since the beginning of times\"",
")",
"cont",
"=",
"0",
"total_processed",
"=",
"0",
"total_written",
"=",
"0",
"for",
"item_block",
"in",
"self",
".",
"_in",
".",
"read_block",
"(",
"size",
"=",
"self",
".",
"_block_size",
",",
"from_date",
"=",
"from_date",
")",
":",
"cont",
"=",
"cont",
"+",
"len",
"(",
"item_block",
")",
"process_results",
"=",
"self",
".",
"process",
"(",
"item_block",
")",
"total_processed",
"+=",
"process_results",
".",
"processed",
"if",
"len",
"(",
"process_results",
".",
"out_items",
")",
">",
"0",
":",
"self",
".",
"_out",
".",
"write",
"(",
"process_results",
".",
"out_items",
")",
"total_written",
"+=",
"len",
"(",
"process_results",
".",
"out_items",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"No new items to be written this time.\"",
")",
"logger",
".",
"info",
"(",
"\"Items read/to be written/total read/total processed/total written: \"",
"\"{0}/{1}/{2}/{3}/{4}\"",
".",
"format",
"(",
"str",
"(",
"len",
"(",
"item_block",
")",
")",
",",
"str",
"(",
"len",
"(",
"process_results",
".",
"out_items",
")",
")",
",",
"str",
"(",
"cont",
")",
",",
"str",
"(",
"total_processed",
")",
",",
"str",
"(",
"total_written",
")",
")",
")",
"logger",
".",
"info",
"(",
"\"SUMMARY: Items total read/total processed/total written: \"",
"\"{0}/{1}/{2}\"",
".",
"format",
"(",
"str",
"(",
"cont",
")",
",",
"str",
"(",
"total_processed",
")",
",",
"str",
"(",
"total_written",
")",
")",
")",
"logger",
".",
"info",
"(",
"\"This is the end.\"",
")",
"return",
"total_written"
]
| Populate an enriched index by processing input items in blocks.
:return: total number of out_items written. | [
"Populate",
"an",
"enriched",
"index",
"by",
"processing",
"input",
"items",
"in",
"blocks",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/ceres_base.py#L77-L119 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/ceres_base.py | ESConnector.read_item | def read_item(self, from_date=None):
"""Read items and return them one by one.
:param from_date: start date for incremental reading.
:return: next single item when any available.
:raises ValueError: `metadata__timestamp` field not found in index
:raises NotFoundError: index not found in ElasticSearch
"""
search_query = self._build_search_query(from_date)
for hit in helpers.scan(self._es_conn,
search_query,
scroll='300m',
index=self._es_index,
preserve_order=True):
yield hit | python | def read_item(self, from_date=None):
"""Read items and return them one by one.
:param from_date: start date for incremental reading.
:return: next single item when any available.
:raises ValueError: `metadata__timestamp` field not found in index
:raises NotFoundError: index not found in ElasticSearch
"""
search_query = self._build_search_query(from_date)
for hit in helpers.scan(self._es_conn,
search_query,
scroll='300m',
index=self._es_index,
preserve_order=True):
yield hit | [
"def",
"read_item",
"(",
"self",
",",
"from_date",
"=",
"None",
")",
":",
"search_query",
"=",
"self",
".",
"_build_search_query",
"(",
"from_date",
")",
"for",
"hit",
"in",
"helpers",
".",
"scan",
"(",
"self",
".",
"_es_conn",
",",
"search_query",
",",
"scroll",
"=",
"'300m'",
",",
"index",
"=",
"self",
".",
"_es_index",
",",
"preserve_order",
"=",
"True",
")",
":",
"yield",
"hit"
]
| Read items and return them one by one.
:param from_date: start date for incremental reading.
:return: next single item when any available.
:raises ValueError: `metadata__timestamp` field not found in index
:raises NotFoundError: index not found in ElasticSearch | [
"Read",
"items",
"and",
"return",
"them",
"one",
"by",
"one",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/ceres_base.py#L161-L175 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/ceres_base.py | ESConnector.read_block | def read_block(self, size, from_date=None):
"""Read items and return them in blocks.
:param from_date: start date for incremental reading.
:param size: block size.
:return: next block of items when any available.
:raises ValueError: `metadata__timestamp` field not found in index
:raises NotFoundError: index not found in ElasticSearch
"""
search_query = self._build_search_query(from_date)
hits_block = []
for hit in helpers.scan(self._es_conn,
search_query,
scroll='300m',
index=self._es_index,
preserve_order=True):
hits_block.append(hit)
if len(hits_block) % size == 0:
yield hits_block
# Reset hits block
hits_block = []
if len(hits_block) > 0:
yield hits_block | python | def read_block(self, size, from_date=None):
"""Read items and return them in blocks.
:param from_date: start date for incremental reading.
:param size: block size.
:return: next block of items when any available.
:raises ValueError: `metadata__timestamp` field not found in index
:raises NotFoundError: index not found in ElasticSearch
"""
search_query = self._build_search_query(from_date)
hits_block = []
for hit in helpers.scan(self._es_conn,
search_query,
scroll='300m',
index=self._es_index,
preserve_order=True):
hits_block.append(hit)
if len(hits_block) % size == 0:
yield hits_block
# Reset hits block
hits_block = []
if len(hits_block) > 0:
yield hits_block | [
"def",
"read_block",
"(",
"self",
",",
"size",
",",
"from_date",
"=",
"None",
")",
":",
"search_query",
"=",
"self",
".",
"_build_search_query",
"(",
"from_date",
")",
"hits_block",
"=",
"[",
"]",
"for",
"hit",
"in",
"helpers",
".",
"scan",
"(",
"self",
".",
"_es_conn",
",",
"search_query",
",",
"scroll",
"=",
"'300m'",
",",
"index",
"=",
"self",
".",
"_es_index",
",",
"preserve_order",
"=",
"True",
")",
":",
"hits_block",
".",
"append",
"(",
"hit",
")",
"if",
"len",
"(",
"hits_block",
")",
"%",
"size",
"==",
"0",
":",
"yield",
"hits_block",
"# Reset hits block",
"hits_block",
"=",
"[",
"]",
"if",
"len",
"(",
"hits_block",
")",
">",
"0",
":",
"yield",
"hits_block"
]
| Read items and return them in blocks.
:param from_date: start date for incremental reading.
:param size: block size.
:return: next block of items when any available.
:raises ValueError: `metadata__timestamp` field not found in index
:raises NotFoundError: index not found in ElasticSearch | [
"Read",
"items",
"and",
"return",
"them",
"in",
"blocks",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/ceres_base.py#L177-L203 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/ceres_base.py | ESConnector.write | def write(self, items):
"""Upload items to ElasticSearch.
:param items: items to be uploaded.
"""
if self._read_only:
raise IOError("Cannot write, Connector created as Read Only")
# Uploading info to the new ES
docs = []
for item in items:
doc = {
"_index": self._es_index,
"_type": "item",
"_id": item["_id"],
"_source": item["_source"]
}
docs.append(doc)
# TODO exception and error handling
helpers.bulk(self._es_conn, docs)
logger.info(self.__log_prefix + " Written: " + str(len(docs))) | python | def write(self, items):
"""Upload items to ElasticSearch.
:param items: items to be uploaded.
"""
if self._read_only:
raise IOError("Cannot write, Connector created as Read Only")
# Uploading info to the new ES
docs = []
for item in items:
doc = {
"_index": self._es_index,
"_type": "item",
"_id": item["_id"],
"_source": item["_source"]
}
docs.append(doc)
# TODO exception and error handling
helpers.bulk(self._es_conn, docs)
logger.info(self.__log_prefix + " Written: " + str(len(docs))) | [
"def",
"write",
"(",
"self",
",",
"items",
")",
":",
"if",
"self",
".",
"_read_only",
":",
"raise",
"IOError",
"(",
"\"Cannot write, Connector created as Read Only\"",
")",
"# Uploading info to the new ES",
"docs",
"=",
"[",
"]",
"for",
"item",
"in",
"items",
":",
"doc",
"=",
"{",
"\"_index\"",
":",
"self",
".",
"_es_index",
",",
"\"_type\"",
":",
"\"item\"",
",",
"\"_id\"",
":",
"item",
"[",
"\"_id\"",
"]",
",",
"\"_source\"",
":",
"item",
"[",
"\"_source\"",
"]",
"}",
"docs",
".",
"append",
"(",
"doc",
")",
"# TODO exception and error handling",
"helpers",
".",
"bulk",
"(",
"self",
".",
"_es_conn",
",",
"docs",
")",
"logger",
".",
"info",
"(",
"self",
".",
"__log_prefix",
"+",
"\" Written: \"",
"+",
"str",
"(",
"len",
"(",
"docs",
")",
")",
")"
]
| Upload items to ElasticSearch.
:param items: items to be uploaded. | [
"Upload",
"items",
"to",
"ElasticSearch",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/ceres_base.py#L205-L225 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/ceres_base.py | ESConnector.create_alias | def create_alias(self, alias_name):
"""Creates an alias pointing to the index configured in this connection"""
return self._es_conn.indices.put_alias(index=self._es_index, name=alias_name) | python | def create_alias(self, alias_name):
"""Creates an alias pointing to the index configured in this connection"""
return self._es_conn.indices.put_alias(index=self._es_index, name=alias_name) | [
"def",
"create_alias",
"(",
"self",
",",
"alias_name",
")",
":",
"return",
"self",
".",
"_es_conn",
".",
"indices",
".",
"put_alias",
"(",
"index",
"=",
"self",
".",
"_es_index",
",",
"name",
"=",
"alias_name",
")"
]
| Creates an alias pointing to the index configured in this connection | [
"Creates",
"an",
"alias",
"pointing",
"to",
"the",
"index",
"configured",
"in",
"this",
"connection"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/ceres_base.py#L292-L295 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/ceres_base.py | ESConnector.exists_alias | def exists_alias(self, alias_name, index_name=None):
"""Check whether or not the given alias exists
:return: True if alias already exist"""
return self._es_conn.indices.exists_alias(index=index_name, name=alias_name) | python | def exists_alias(self, alias_name, index_name=None):
"""Check whether or not the given alias exists
:return: True if alias already exist"""
return self._es_conn.indices.exists_alias(index=index_name, name=alias_name) | [
"def",
"exists_alias",
"(",
"self",
",",
"alias_name",
",",
"index_name",
"=",
"None",
")",
":",
"return",
"self",
".",
"_es_conn",
".",
"indices",
".",
"exists_alias",
"(",
"index",
"=",
"index_name",
",",
"name",
"=",
"alias_name",
")"
]
| Check whether or not the given alias exists
:return: True if alias already exist | [
"Check",
"whether",
"or",
"not",
"the",
"given",
"alias",
"exists"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/ceres_base.py#L297-L302 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/ceres_base.py | ESConnector._build_search_query | def _build_search_query(self, from_date):
"""Build an ElasticSearch search query to retrieve items for read methods.
:param from_date: date to start retrieving items from.
:return: JSON query in dict format
"""
sort = [{self._sort_on_field: {"order": "asc"}}]
filters = []
if self._repo:
filters.append({"term": {"origin": self._repo}})
if from_date:
filters.append({"range": {self._sort_on_field: {"gte": from_date}}})
if filters:
query = {"bool": {"filter": filters}}
else:
query = {"match_all": {}}
search_query = {
"query": query,
"sort": sort
}
return search_query | python | def _build_search_query(self, from_date):
"""Build an ElasticSearch search query to retrieve items for read methods.
:param from_date: date to start retrieving items from.
:return: JSON query in dict format
"""
sort = [{self._sort_on_field: {"order": "asc"}}]
filters = []
if self._repo:
filters.append({"term": {"origin": self._repo}})
if from_date:
filters.append({"range": {self._sort_on_field: {"gte": from_date}}})
if filters:
query = {"bool": {"filter": filters}}
else:
query = {"match_all": {}}
search_query = {
"query": query,
"sort": sort
}
return search_query | [
"def",
"_build_search_query",
"(",
"self",
",",
"from_date",
")",
":",
"sort",
"=",
"[",
"{",
"self",
".",
"_sort_on_field",
":",
"{",
"\"order\"",
":",
"\"asc\"",
"}",
"}",
"]",
"filters",
"=",
"[",
"]",
"if",
"self",
".",
"_repo",
":",
"filters",
".",
"append",
"(",
"{",
"\"term\"",
":",
"{",
"\"origin\"",
":",
"self",
".",
"_repo",
"}",
"}",
")",
"if",
"from_date",
":",
"filters",
".",
"append",
"(",
"{",
"\"range\"",
":",
"{",
"self",
".",
"_sort_on_field",
":",
"{",
"\"gte\"",
":",
"from_date",
"}",
"}",
"}",
")",
"if",
"filters",
":",
"query",
"=",
"{",
"\"bool\"",
":",
"{",
"\"filter\"",
":",
"filters",
"}",
"}",
"else",
":",
"query",
"=",
"{",
"\"match_all\"",
":",
"{",
"}",
"}",
"search_query",
"=",
"{",
"\"query\"",
":",
"query",
",",
"\"sort\"",
":",
"sort",
"}",
"return",
"search_query"
]
| Build an ElasticSearch search query to retrieve items for read methods.
:param from_date: date to start retrieving items from.
:return: JSON query in dict format | [
"Build",
"an",
"ElasticSearch",
"search",
"query",
"to",
"retrieve",
"items",
"for",
"read",
"methods",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/ceres_base.py#L304-L330 | train |
chaoss/grimoirelab-elk | grimoire_elk/raw/elastic.py | ElasticOcean.add_params | def add_params(cls, cmdline_parser):
""" Shared params in all backends """
parser = cmdline_parser
parser.add_argument("-e", "--elastic_url", default="http://127.0.0.1:9200",
help="Host with elastic search (default: http://127.0.0.1:9200)")
parser.add_argument("--elastic_url-enrich",
help="Host with elastic search and enriched indexes") | python | def add_params(cls, cmdline_parser):
""" Shared params in all backends """
parser = cmdline_parser
parser.add_argument("-e", "--elastic_url", default="http://127.0.0.1:9200",
help="Host with elastic search (default: http://127.0.0.1:9200)")
parser.add_argument("--elastic_url-enrich",
help="Host with elastic search and enriched indexes") | [
"def",
"add_params",
"(",
"cls",
",",
"cmdline_parser",
")",
":",
"parser",
"=",
"cmdline_parser",
"parser",
".",
"add_argument",
"(",
"\"-e\"",
",",
"\"--elastic_url\"",
",",
"default",
"=",
"\"http://127.0.0.1:9200\"",
",",
"help",
"=",
"\"Host with elastic search (default: http://127.0.0.1:9200)\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--elastic_url-enrich\"",
",",
"help",
"=",
"\"Host with elastic search and enriched indexes\"",
")"
]
| Shared params in all backends | [
"Shared",
"params",
"in",
"all",
"backends"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/raw/elastic.py#L47-L55 | train |
chaoss/grimoirelab-elk | grimoire_elk/raw/elastic.py | ElasticOcean.get_p2o_params_from_url | def get_p2o_params_from_url(cls, url):
""" Get the p2o params given a URL for the data source """
# if the url doesn't contain a filter separator, return it
if PRJ_JSON_FILTER_SEPARATOR not in url:
return {"url": url}
# otherwise, add the url to the params
params = {'url': url.split(' ', 1)[0]}
# tokenize the filter and add them to the param dict
tokens = url.split(PRJ_JSON_FILTER_SEPARATOR)[1:]
if len(tokens) > 1:
cause = "Too many filters defined for %s, only the first one is considered" % url
logger.warning(cause)
token = tokens[0]
filter_tokens = token.split(PRJ_JSON_FILTER_OP_ASSIGNMENT)
if len(filter_tokens) != 2:
cause = "Too many tokens after splitting for %s in %s" % (token, url)
logger.error(cause)
raise ELKError(cause=cause)
fltr_name = filter_tokens[0].strip()
fltr_value = filter_tokens[1].strip()
params['filter-' + fltr_name] = fltr_value
return params | python | def get_p2o_params_from_url(cls, url):
""" Get the p2o params given a URL for the data source """
# if the url doesn't contain a filter separator, return it
if PRJ_JSON_FILTER_SEPARATOR not in url:
return {"url": url}
# otherwise, add the url to the params
params = {'url': url.split(' ', 1)[0]}
# tokenize the filter and add them to the param dict
tokens = url.split(PRJ_JSON_FILTER_SEPARATOR)[1:]
if len(tokens) > 1:
cause = "Too many filters defined for %s, only the first one is considered" % url
logger.warning(cause)
token = tokens[0]
filter_tokens = token.split(PRJ_JSON_FILTER_OP_ASSIGNMENT)
if len(filter_tokens) != 2:
cause = "Too many tokens after splitting for %s in %s" % (token, url)
logger.error(cause)
raise ELKError(cause=cause)
fltr_name = filter_tokens[0].strip()
fltr_value = filter_tokens[1].strip()
params['filter-' + fltr_name] = fltr_value
return params | [
"def",
"get_p2o_params_from_url",
"(",
"cls",
",",
"url",
")",
":",
"# if the url doesn't contain a filter separator, return it",
"if",
"PRJ_JSON_FILTER_SEPARATOR",
"not",
"in",
"url",
":",
"return",
"{",
"\"url\"",
":",
"url",
"}",
"# otherwise, add the url to the params",
"params",
"=",
"{",
"'url'",
":",
"url",
".",
"split",
"(",
"' '",
",",
"1",
")",
"[",
"0",
"]",
"}",
"# tokenize the filter and add them to the param dict",
"tokens",
"=",
"url",
".",
"split",
"(",
"PRJ_JSON_FILTER_SEPARATOR",
")",
"[",
"1",
":",
"]",
"if",
"len",
"(",
"tokens",
")",
">",
"1",
":",
"cause",
"=",
"\"Too many filters defined for %s, only the first one is considered\"",
"%",
"url",
"logger",
".",
"warning",
"(",
"cause",
")",
"token",
"=",
"tokens",
"[",
"0",
"]",
"filter_tokens",
"=",
"token",
".",
"split",
"(",
"PRJ_JSON_FILTER_OP_ASSIGNMENT",
")",
"if",
"len",
"(",
"filter_tokens",
")",
"!=",
"2",
":",
"cause",
"=",
"\"Too many tokens after splitting for %s in %s\"",
"%",
"(",
"token",
",",
"url",
")",
"logger",
".",
"error",
"(",
"cause",
")",
"raise",
"ELKError",
"(",
"cause",
"=",
"cause",
")",
"fltr_name",
"=",
"filter_tokens",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"fltr_value",
"=",
"filter_tokens",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"params",
"[",
"'filter-'",
"+",
"fltr_name",
"]",
"=",
"fltr_value",
"return",
"params"
]
| Get the p2o params given a URL for the data source | [
"Get",
"the",
"p2o",
"params",
"given",
"a",
"URL",
"for",
"the",
"data",
"source"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/raw/elastic.py#L98-L127 | train |
chaoss/grimoirelab-elk | grimoire_elk/raw/elastic.py | ElasticOcean.feed | def feed(self, from_date=None, from_offset=None, category=None,
latest_items=None, arthur_items=None, filter_classified=None):
""" Feed data in Elastic from Perceval or Arthur """
if self.fetch_archive:
items = self.perceval_backend.fetch_from_archive()
self.feed_items(items)
return
elif arthur_items:
items = arthur_items
self.feed_items(items)
return
if from_date and from_offset:
raise RuntimeError("Can't not feed using from_date and from_offset.")
# We need to filter by repository to support several repositories
# in the same raw index
filters_ = [get_repository_filter(self.perceval_backend,
self.get_connector_name())]
# Check if backend supports from_date
signature = inspect.signature(self.perceval_backend.fetch)
last_update = None
if 'from_date' in signature.parameters:
if from_date:
last_update = from_date
else:
self.last_update = self.get_last_update_from_es(filters_=filters_)
last_update = self.last_update
logger.info("Incremental from: %s", last_update)
offset = None
if 'offset' in signature.parameters:
if from_offset:
offset = from_offset
else:
offset = self.elastic.get_last_offset("offset", filters_=filters_)
if offset is not None:
logger.info("Incremental from: %i offset", offset)
else:
logger.info("Not incremental")
params = {}
# category and filter_classified params are shared
# by all Perceval backends
if category is not None:
params['category'] = category
if filter_classified is not None:
params['filter_classified'] = filter_classified
# latest items, from_date and offset cannot be used together,
# thus, the params dictionary is filled with the param available
# and Perceval is executed
if latest_items:
params['latest_items'] = latest_items
items = self.perceval_backend.fetch(**params)
elif last_update:
last_update = last_update.replace(tzinfo=None)
params['from_date'] = last_update
items = self.perceval_backend.fetch(**params)
elif offset is not None:
params['offset'] = offset
items = self.perceval_backend.fetch(**params)
else:
items = self.perceval_backend.fetch(**params)
self.feed_items(items)
self.update_items() | python | def feed(self, from_date=None, from_offset=None, category=None,
latest_items=None, arthur_items=None, filter_classified=None):
""" Feed data in Elastic from Perceval or Arthur """
if self.fetch_archive:
items = self.perceval_backend.fetch_from_archive()
self.feed_items(items)
return
elif arthur_items:
items = arthur_items
self.feed_items(items)
return
if from_date and from_offset:
raise RuntimeError("Can't not feed using from_date and from_offset.")
# We need to filter by repository to support several repositories
# in the same raw index
filters_ = [get_repository_filter(self.perceval_backend,
self.get_connector_name())]
# Check if backend supports from_date
signature = inspect.signature(self.perceval_backend.fetch)
last_update = None
if 'from_date' in signature.parameters:
if from_date:
last_update = from_date
else:
self.last_update = self.get_last_update_from_es(filters_=filters_)
last_update = self.last_update
logger.info("Incremental from: %s", last_update)
offset = None
if 'offset' in signature.parameters:
if from_offset:
offset = from_offset
else:
offset = self.elastic.get_last_offset("offset", filters_=filters_)
if offset is not None:
logger.info("Incremental from: %i offset", offset)
else:
logger.info("Not incremental")
params = {}
# category and filter_classified params are shared
# by all Perceval backends
if category is not None:
params['category'] = category
if filter_classified is not None:
params['filter_classified'] = filter_classified
# latest items, from_date and offset cannot be used together,
# thus, the params dictionary is filled with the param available
# and Perceval is executed
if latest_items:
params['latest_items'] = latest_items
items = self.perceval_backend.fetch(**params)
elif last_update:
last_update = last_update.replace(tzinfo=None)
params['from_date'] = last_update
items = self.perceval_backend.fetch(**params)
elif offset is not None:
params['offset'] = offset
items = self.perceval_backend.fetch(**params)
else:
items = self.perceval_backend.fetch(**params)
self.feed_items(items)
self.update_items() | [
"def",
"feed",
"(",
"self",
",",
"from_date",
"=",
"None",
",",
"from_offset",
"=",
"None",
",",
"category",
"=",
"None",
",",
"latest_items",
"=",
"None",
",",
"arthur_items",
"=",
"None",
",",
"filter_classified",
"=",
"None",
")",
":",
"if",
"self",
".",
"fetch_archive",
":",
"items",
"=",
"self",
".",
"perceval_backend",
".",
"fetch_from_archive",
"(",
")",
"self",
".",
"feed_items",
"(",
"items",
")",
"return",
"elif",
"arthur_items",
":",
"items",
"=",
"arthur_items",
"self",
".",
"feed_items",
"(",
"items",
")",
"return",
"if",
"from_date",
"and",
"from_offset",
":",
"raise",
"RuntimeError",
"(",
"\"Can't not feed using from_date and from_offset.\"",
")",
"# We need to filter by repository to support several repositories",
"# in the same raw index",
"filters_",
"=",
"[",
"get_repository_filter",
"(",
"self",
".",
"perceval_backend",
",",
"self",
".",
"get_connector_name",
"(",
")",
")",
"]",
"# Check if backend supports from_date",
"signature",
"=",
"inspect",
".",
"signature",
"(",
"self",
".",
"perceval_backend",
".",
"fetch",
")",
"last_update",
"=",
"None",
"if",
"'from_date'",
"in",
"signature",
".",
"parameters",
":",
"if",
"from_date",
":",
"last_update",
"=",
"from_date",
"else",
":",
"self",
".",
"last_update",
"=",
"self",
".",
"get_last_update_from_es",
"(",
"filters_",
"=",
"filters_",
")",
"last_update",
"=",
"self",
".",
"last_update",
"logger",
".",
"info",
"(",
"\"Incremental from: %s\"",
",",
"last_update",
")",
"offset",
"=",
"None",
"if",
"'offset'",
"in",
"signature",
".",
"parameters",
":",
"if",
"from_offset",
":",
"offset",
"=",
"from_offset",
"else",
":",
"offset",
"=",
"self",
".",
"elastic",
".",
"get_last_offset",
"(",
"\"offset\"",
",",
"filters_",
"=",
"filters_",
")",
"if",
"offset",
"is",
"not",
"None",
":",
"logger",
".",
"info",
"(",
"\"Incremental from: %i offset\"",
",",
"offset",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Not incremental\"",
")",
"params",
"=",
"{",
"}",
"# category and filter_classified params are shared",
"# by all Perceval backends",
"if",
"category",
"is",
"not",
"None",
":",
"params",
"[",
"'category'",
"]",
"=",
"category",
"if",
"filter_classified",
"is",
"not",
"None",
":",
"params",
"[",
"'filter_classified'",
"]",
"=",
"filter_classified",
"# latest items, from_date and offset cannot be used together,",
"# thus, the params dictionary is filled with the param available",
"# and Perceval is executed",
"if",
"latest_items",
":",
"params",
"[",
"'latest_items'",
"]",
"=",
"latest_items",
"items",
"=",
"self",
".",
"perceval_backend",
".",
"fetch",
"(",
"*",
"*",
"params",
")",
"elif",
"last_update",
":",
"last_update",
"=",
"last_update",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"params",
"[",
"'from_date'",
"]",
"=",
"last_update",
"items",
"=",
"self",
".",
"perceval_backend",
".",
"fetch",
"(",
"*",
"*",
"params",
")",
"elif",
"offset",
"is",
"not",
"None",
":",
"params",
"[",
"'offset'",
"]",
"=",
"offset",
"items",
"=",
"self",
".",
"perceval_backend",
".",
"fetch",
"(",
"*",
"*",
"params",
")",
"else",
":",
"items",
"=",
"self",
".",
"perceval_backend",
".",
"fetch",
"(",
"*",
"*",
"params",
")",
"self",
".",
"feed_items",
"(",
"items",
")",
"self",
".",
"update_items",
"(",
")"
]
| Feed data in Elastic from Perceval or Arthur | [
"Feed",
"data",
"in",
"Elastic",
"from",
"Perceval",
"or",
"Arthur"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/raw/elastic.py#L155-L226 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/git.py | GitEnrich.get_identities | def get_identities(self, item):
""" Return the identities from an item.
If the repo is in GitHub, get the usernames from GitHub. """
def add_sh_github_identity(user, user_field, rol):
""" Add a new github identity to SH if it does not exists """
github_repo = None
if GITHUB in item['origin']:
github_repo = item['origin'].replace(GITHUB, '')
github_repo = re.sub('.git$', '', github_repo)
if not github_repo:
return
# Try to get the identity from SH
user_data = item['data'][user_field]
sh_identity = SortingHat.get_github_commit_username(self.sh_db, user, SH_GIT_COMMIT)
if not sh_identity:
# Get the usename from GitHub
gh_username = self.get_github_login(user_data, rol, commit_hash, github_repo)
# Create a new SH identity with name, email from git and username from github
logger.debug("Adding new identity %s to SH %s: %s", gh_username, SH_GIT_COMMIT, user)
user = self.get_sh_identity(user_data)
user['username'] = gh_username
SortingHat.add_identity(self.sh_db, user, SH_GIT_COMMIT)
else:
if user_data not in self.github_logins:
self.github_logins[user_data] = sh_identity['username']
logger.debug("GitHub-commit exists. username:%s user:%s",
sh_identity['username'], user_data)
commit_hash = item['data']['commit']
if item['data']['Author']:
# Check multi authors commits
m = self.AUTHOR_P2P_REGEX.match(item['data']["Author"])
n = self.AUTHOR_P2P_NEW_REGEX.match(item['data']["Author"])
if (m or n) and self.pair_programming:
authors = self.__get_authors(item['data']["Author"])
for author in authors:
user = self.get_sh_identity(author)
yield user
else:
user = self.get_sh_identity(item['data']["Author"])
yield user
if self.github_token:
add_sh_github_identity(user, 'Author', 'author')
if item['data']['Commit']:
m = self.AUTHOR_P2P_REGEX.match(item['data']["Commit"])
n = self.AUTHOR_P2P_NEW_REGEX.match(item['data']["Author"])
if (m or n) and self.pair_programming:
committers = self.__get_authors(item['data']['Commit'])
for committer in committers:
user = self.get_sh_identity(committer)
yield user
else:
user = self.get_sh_identity(item['data']['Commit'])
yield user
if self.github_token:
add_sh_github_identity(user, 'Commit', 'committer')
if 'Signed-off-by' in item['data'] and self.pair_programming:
signers = item['data']["Signed-off-by"]
for signer in signers:
user = self.get_sh_identity(signer)
yield user | python | def get_identities(self, item):
""" Return the identities from an item.
If the repo is in GitHub, get the usernames from GitHub. """
def add_sh_github_identity(user, user_field, rol):
""" Add a new github identity to SH if it does not exists """
github_repo = None
if GITHUB in item['origin']:
github_repo = item['origin'].replace(GITHUB, '')
github_repo = re.sub('.git$', '', github_repo)
if not github_repo:
return
# Try to get the identity from SH
user_data = item['data'][user_field]
sh_identity = SortingHat.get_github_commit_username(self.sh_db, user, SH_GIT_COMMIT)
if not sh_identity:
# Get the usename from GitHub
gh_username = self.get_github_login(user_data, rol, commit_hash, github_repo)
# Create a new SH identity with name, email from git and username from github
logger.debug("Adding new identity %s to SH %s: %s", gh_username, SH_GIT_COMMIT, user)
user = self.get_sh_identity(user_data)
user['username'] = gh_username
SortingHat.add_identity(self.sh_db, user, SH_GIT_COMMIT)
else:
if user_data not in self.github_logins:
self.github_logins[user_data] = sh_identity['username']
logger.debug("GitHub-commit exists. username:%s user:%s",
sh_identity['username'], user_data)
commit_hash = item['data']['commit']
if item['data']['Author']:
# Check multi authors commits
m = self.AUTHOR_P2P_REGEX.match(item['data']["Author"])
n = self.AUTHOR_P2P_NEW_REGEX.match(item['data']["Author"])
if (m or n) and self.pair_programming:
authors = self.__get_authors(item['data']["Author"])
for author in authors:
user = self.get_sh_identity(author)
yield user
else:
user = self.get_sh_identity(item['data']["Author"])
yield user
if self.github_token:
add_sh_github_identity(user, 'Author', 'author')
if item['data']['Commit']:
m = self.AUTHOR_P2P_REGEX.match(item['data']["Commit"])
n = self.AUTHOR_P2P_NEW_REGEX.match(item['data']["Author"])
if (m or n) and self.pair_programming:
committers = self.__get_authors(item['data']['Commit'])
for committer in committers:
user = self.get_sh_identity(committer)
yield user
else:
user = self.get_sh_identity(item['data']['Commit'])
yield user
if self.github_token:
add_sh_github_identity(user, 'Commit', 'committer')
if 'Signed-off-by' in item['data'] and self.pair_programming:
signers = item['data']["Signed-off-by"]
for signer in signers:
user = self.get_sh_identity(signer)
yield user | [
"def",
"get_identities",
"(",
"self",
",",
"item",
")",
":",
"def",
"add_sh_github_identity",
"(",
"user",
",",
"user_field",
",",
"rol",
")",
":",
"\"\"\" Add a new github identity to SH if it does not exists \"\"\"",
"github_repo",
"=",
"None",
"if",
"GITHUB",
"in",
"item",
"[",
"'origin'",
"]",
":",
"github_repo",
"=",
"item",
"[",
"'origin'",
"]",
".",
"replace",
"(",
"GITHUB",
",",
"''",
")",
"github_repo",
"=",
"re",
".",
"sub",
"(",
"'.git$'",
",",
"''",
",",
"github_repo",
")",
"if",
"not",
"github_repo",
":",
"return",
"# Try to get the identity from SH",
"user_data",
"=",
"item",
"[",
"'data'",
"]",
"[",
"user_field",
"]",
"sh_identity",
"=",
"SortingHat",
".",
"get_github_commit_username",
"(",
"self",
".",
"sh_db",
",",
"user",
",",
"SH_GIT_COMMIT",
")",
"if",
"not",
"sh_identity",
":",
"# Get the usename from GitHub",
"gh_username",
"=",
"self",
".",
"get_github_login",
"(",
"user_data",
",",
"rol",
",",
"commit_hash",
",",
"github_repo",
")",
"# Create a new SH identity with name, email from git and username from github",
"logger",
".",
"debug",
"(",
"\"Adding new identity %s to SH %s: %s\"",
",",
"gh_username",
",",
"SH_GIT_COMMIT",
",",
"user",
")",
"user",
"=",
"self",
".",
"get_sh_identity",
"(",
"user_data",
")",
"user",
"[",
"'username'",
"]",
"=",
"gh_username",
"SortingHat",
".",
"add_identity",
"(",
"self",
".",
"sh_db",
",",
"user",
",",
"SH_GIT_COMMIT",
")",
"else",
":",
"if",
"user_data",
"not",
"in",
"self",
".",
"github_logins",
":",
"self",
".",
"github_logins",
"[",
"user_data",
"]",
"=",
"sh_identity",
"[",
"'username'",
"]",
"logger",
".",
"debug",
"(",
"\"GitHub-commit exists. username:%s user:%s\"",
",",
"sh_identity",
"[",
"'username'",
"]",
",",
"user_data",
")",
"commit_hash",
"=",
"item",
"[",
"'data'",
"]",
"[",
"'commit'",
"]",
"if",
"item",
"[",
"'data'",
"]",
"[",
"'Author'",
"]",
":",
"# Check multi authors commits",
"m",
"=",
"self",
".",
"AUTHOR_P2P_REGEX",
".",
"match",
"(",
"item",
"[",
"'data'",
"]",
"[",
"\"Author\"",
"]",
")",
"n",
"=",
"self",
".",
"AUTHOR_P2P_NEW_REGEX",
".",
"match",
"(",
"item",
"[",
"'data'",
"]",
"[",
"\"Author\"",
"]",
")",
"if",
"(",
"m",
"or",
"n",
")",
"and",
"self",
".",
"pair_programming",
":",
"authors",
"=",
"self",
".",
"__get_authors",
"(",
"item",
"[",
"'data'",
"]",
"[",
"\"Author\"",
"]",
")",
"for",
"author",
"in",
"authors",
":",
"user",
"=",
"self",
".",
"get_sh_identity",
"(",
"author",
")",
"yield",
"user",
"else",
":",
"user",
"=",
"self",
".",
"get_sh_identity",
"(",
"item",
"[",
"'data'",
"]",
"[",
"\"Author\"",
"]",
")",
"yield",
"user",
"if",
"self",
".",
"github_token",
":",
"add_sh_github_identity",
"(",
"user",
",",
"'Author'",
",",
"'author'",
")",
"if",
"item",
"[",
"'data'",
"]",
"[",
"'Commit'",
"]",
":",
"m",
"=",
"self",
".",
"AUTHOR_P2P_REGEX",
".",
"match",
"(",
"item",
"[",
"'data'",
"]",
"[",
"\"Commit\"",
"]",
")",
"n",
"=",
"self",
".",
"AUTHOR_P2P_NEW_REGEX",
".",
"match",
"(",
"item",
"[",
"'data'",
"]",
"[",
"\"Author\"",
"]",
")",
"if",
"(",
"m",
"or",
"n",
")",
"and",
"self",
".",
"pair_programming",
":",
"committers",
"=",
"self",
".",
"__get_authors",
"(",
"item",
"[",
"'data'",
"]",
"[",
"'Commit'",
"]",
")",
"for",
"committer",
"in",
"committers",
":",
"user",
"=",
"self",
".",
"get_sh_identity",
"(",
"committer",
")",
"yield",
"user",
"else",
":",
"user",
"=",
"self",
".",
"get_sh_identity",
"(",
"item",
"[",
"'data'",
"]",
"[",
"'Commit'",
"]",
")",
"yield",
"user",
"if",
"self",
".",
"github_token",
":",
"add_sh_github_identity",
"(",
"user",
",",
"'Commit'",
",",
"'committer'",
")",
"if",
"'Signed-off-by'",
"in",
"item",
"[",
"'data'",
"]",
"and",
"self",
".",
"pair_programming",
":",
"signers",
"=",
"item",
"[",
"'data'",
"]",
"[",
"\"Signed-off-by\"",
"]",
"for",
"signer",
"in",
"signers",
":",
"user",
"=",
"self",
".",
"get_sh_identity",
"(",
"signer",
")",
"yield",
"user"
]
| Return the identities from an item.
If the repo is in GitHub, get the usernames from GitHub. | [
"Return",
"the",
"identities",
"from",
"an",
"item",
".",
"If",
"the",
"repo",
"is",
"in",
"GitHub",
"get",
"the",
"usernames",
"from",
"GitHub",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/git.py#L148-L211 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/git.py | GitEnrich.__fix_field_date | def __fix_field_date(self, item, attribute):
"""Fix possible errors in the field date"""
field_date = str_to_datetime(item[attribute])
try:
_ = int(field_date.strftime("%z")[0:3])
except ValueError:
logger.warning("%s in commit %s has a wrong format", attribute, item['commit'])
item[attribute] = field_date.replace(tzinfo=None).isoformat() | python | def __fix_field_date(self, item, attribute):
"""Fix possible errors in the field date"""
field_date = str_to_datetime(item[attribute])
try:
_ = int(field_date.strftime("%z")[0:3])
except ValueError:
logger.warning("%s in commit %s has a wrong format", attribute, item['commit'])
item[attribute] = field_date.replace(tzinfo=None).isoformat() | [
"def",
"__fix_field_date",
"(",
"self",
",",
"item",
",",
"attribute",
")",
":",
"field_date",
"=",
"str_to_datetime",
"(",
"item",
"[",
"attribute",
"]",
")",
"try",
":",
"_",
"=",
"int",
"(",
"field_date",
".",
"strftime",
"(",
"\"%z\"",
")",
"[",
"0",
":",
"3",
"]",
")",
"except",
"ValueError",
":",
"logger",
".",
"warning",
"(",
"\"%s in commit %s has a wrong format\"",
",",
"attribute",
",",
"item",
"[",
"'commit'",
"]",
")",
"item",
"[",
"attribute",
"]",
"=",
"field_date",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
".",
"isoformat",
"(",
")"
]
| Fix possible errors in the field date | [
"Fix",
"possible",
"errors",
"in",
"the",
"field",
"date"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/git.py#L425-L434 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/git.py | GitEnrich.update_items | def update_items(self, ocean_backend, enrich_backend):
"""Retrieve the commits not present in the original repository and delete
the corresponding documents from the raw and enriched indexes"""
fltr = {
'name': 'origin',
'value': [self.perceval_backend.origin]
}
logger.debug("[update-items] Checking commits for %s.", self.perceval_backend.origin)
git_repo = GitRepository(self.perceval_backend.uri, self.perceval_backend.gitpath)
try:
current_hashes = set([commit for commit in git_repo.rev_list()])
except Exception as e:
logger.error("Skip updating branch info for repo %s, git rev-list command failed: %s", git_repo.uri, e)
return
raw_hashes = set([item['data']['commit']
for item in ocean_backend.fetch(ignore_incremental=True, _filter=fltr)])
hashes_to_delete = list(raw_hashes.difference(current_hashes))
to_process = []
for _hash in hashes_to_delete:
to_process.append(_hash)
if len(to_process) != MAX_BULK_UPDATE_SIZE:
continue
# delete documents from the raw index
self.remove_commits(to_process, ocean_backend.elastic.index_url,
'data.commit', self.perceval_backend.origin)
# delete documents from the enriched index
self.remove_commits(to_process, enrich_backend.elastic.index_url,
'hash', self.perceval_backend.origin)
to_process = []
if to_process:
# delete documents from the raw index
self.remove_commits(to_process, ocean_backend.elastic.index_url,
'data.commit', self.perceval_backend.origin)
# delete documents from the enriched index
self.remove_commits(to_process, enrich_backend.elastic.index_url,
'hash', self.perceval_backend.origin)
logger.debug("[update-items] %s commits deleted from %s with origin %s.",
len(hashes_to_delete), ocean_backend.elastic.anonymize_url(ocean_backend.elastic.index_url),
self.perceval_backend.origin)
logger.debug("[update-items] %s commits deleted from %s with origin %s.",
len(hashes_to_delete), enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url),
self.perceval_backend.origin)
# update branch info
self.delete_commit_branches(enrich_backend)
self.add_commit_branches(git_repo, enrich_backend) | python | def update_items(self, ocean_backend, enrich_backend):
"""Retrieve the commits not present in the original repository and delete
the corresponding documents from the raw and enriched indexes"""
fltr = {
'name': 'origin',
'value': [self.perceval_backend.origin]
}
logger.debug("[update-items] Checking commits for %s.", self.perceval_backend.origin)
git_repo = GitRepository(self.perceval_backend.uri, self.perceval_backend.gitpath)
try:
current_hashes = set([commit for commit in git_repo.rev_list()])
except Exception as e:
logger.error("Skip updating branch info for repo %s, git rev-list command failed: %s", git_repo.uri, e)
return
raw_hashes = set([item['data']['commit']
for item in ocean_backend.fetch(ignore_incremental=True, _filter=fltr)])
hashes_to_delete = list(raw_hashes.difference(current_hashes))
to_process = []
for _hash in hashes_to_delete:
to_process.append(_hash)
if len(to_process) != MAX_BULK_UPDATE_SIZE:
continue
# delete documents from the raw index
self.remove_commits(to_process, ocean_backend.elastic.index_url,
'data.commit', self.perceval_backend.origin)
# delete documents from the enriched index
self.remove_commits(to_process, enrich_backend.elastic.index_url,
'hash', self.perceval_backend.origin)
to_process = []
if to_process:
# delete documents from the raw index
self.remove_commits(to_process, ocean_backend.elastic.index_url,
'data.commit', self.perceval_backend.origin)
# delete documents from the enriched index
self.remove_commits(to_process, enrich_backend.elastic.index_url,
'hash', self.perceval_backend.origin)
logger.debug("[update-items] %s commits deleted from %s with origin %s.",
len(hashes_to_delete), ocean_backend.elastic.anonymize_url(ocean_backend.elastic.index_url),
self.perceval_backend.origin)
logger.debug("[update-items] %s commits deleted from %s with origin %s.",
len(hashes_to_delete), enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url),
self.perceval_backend.origin)
# update branch info
self.delete_commit_branches(enrich_backend)
self.add_commit_branches(git_repo, enrich_backend) | [
"def",
"update_items",
"(",
"self",
",",
"ocean_backend",
",",
"enrich_backend",
")",
":",
"fltr",
"=",
"{",
"'name'",
":",
"'origin'",
",",
"'value'",
":",
"[",
"self",
".",
"perceval_backend",
".",
"origin",
"]",
"}",
"logger",
".",
"debug",
"(",
"\"[update-items] Checking commits for %s.\"",
",",
"self",
".",
"perceval_backend",
".",
"origin",
")",
"git_repo",
"=",
"GitRepository",
"(",
"self",
".",
"perceval_backend",
".",
"uri",
",",
"self",
".",
"perceval_backend",
".",
"gitpath",
")",
"try",
":",
"current_hashes",
"=",
"set",
"(",
"[",
"commit",
"for",
"commit",
"in",
"git_repo",
".",
"rev_list",
"(",
")",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"Skip updating branch info for repo %s, git rev-list command failed: %s\"",
",",
"git_repo",
".",
"uri",
",",
"e",
")",
"return",
"raw_hashes",
"=",
"set",
"(",
"[",
"item",
"[",
"'data'",
"]",
"[",
"'commit'",
"]",
"for",
"item",
"in",
"ocean_backend",
".",
"fetch",
"(",
"ignore_incremental",
"=",
"True",
",",
"_filter",
"=",
"fltr",
")",
"]",
")",
"hashes_to_delete",
"=",
"list",
"(",
"raw_hashes",
".",
"difference",
"(",
"current_hashes",
")",
")",
"to_process",
"=",
"[",
"]",
"for",
"_hash",
"in",
"hashes_to_delete",
":",
"to_process",
".",
"append",
"(",
"_hash",
")",
"if",
"len",
"(",
"to_process",
")",
"!=",
"MAX_BULK_UPDATE_SIZE",
":",
"continue",
"# delete documents from the raw index",
"self",
".",
"remove_commits",
"(",
"to_process",
",",
"ocean_backend",
".",
"elastic",
".",
"index_url",
",",
"'data.commit'",
",",
"self",
".",
"perceval_backend",
".",
"origin",
")",
"# delete documents from the enriched index",
"self",
".",
"remove_commits",
"(",
"to_process",
",",
"enrich_backend",
".",
"elastic",
".",
"index_url",
",",
"'hash'",
",",
"self",
".",
"perceval_backend",
".",
"origin",
")",
"to_process",
"=",
"[",
"]",
"if",
"to_process",
":",
"# delete documents from the raw index",
"self",
".",
"remove_commits",
"(",
"to_process",
",",
"ocean_backend",
".",
"elastic",
".",
"index_url",
",",
"'data.commit'",
",",
"self",
".",
"perceval_backend",
".",
"origin",
")",
"# delete documents from the enriched index",
"self",
".",
"remove_commits",
"(",
"to_process",
",",
"enrich_backend",
".",
"elastic",
".",
"index_url",
",",
"'hash'",
",",
"self",
".",
"perceval_backend",
".",
"origin",
")",
"logger",
".",
"debug",
"(",
"\"[update-items] %s commits deleted from %s with origin %s.\"",
",",
"len",
"(",
"hashes_to_delete",
")",
",",
"ocean_backend",
".",
"elastic",
".",
"anonymize_url",
"(",
"ocean_backend",
".",
"elastic",
".",
"index_url",
")",
",",
"self",
".",
"perceval_backend",
".",
"origin",
")",
"logger",
".",
"debug",
"(",
"\"[update-items] %s commits deleted from %s with origin %s.\"",
",",
"len",
"(",
"hashes_to_delete",
")",
",",
"enrich_backend",
".",
"elastic",
".",
"anonymize_url",
"(",
"enrich_backend",
".",
"elastic",
".",
"index_url",
")",
",",
"self",
".",
"perceval_backend",
".",
"origin",
")",
"# update branch info",
"self",
".",
"delete_commit_branches",
"(",
"enrich_backend",
")",
"self",
".",
"add_commit_branches",
"(",
"git_repo",
",",
"enrich_backend",
")"
]
| Retrieve the commits not present in the original repository and delete
the corresponding documents from the raw and enriched indexes | [
"Retrieve",
"the",
"commits",
"not",
"present",
"in",
"the",
"original",
"repository",
"and",
"delete",
"the",
"corresponding",
"documents",
"from",
"the",
"raw",
"and",
"enriched",
"indexes"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/git.py#L668-L725 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/git.py | GitEnrich.add_commit_branches | def add_commit_branches(self, git_repo, enrich_backend):
"""Add the information about branches to the documents representing commits in
the enriched index. Branches are obtained using the command `git ls-remote`,
then for each branch, the list of commits is retrieved via the command `git rev-list branch-name` and
used to update the corresponding items in the enriched index.
:param git_repo: GitRepository object
:param enrich_backend: the enrich backend
"""
to_process = []
for hash, refname in git_repo._discover_refs(remote=True):
if not refname.startswith('refs/heads/'):
continue
commit_count = 0
branch_name = refname.replace('refs/heads/', '')
try:
commits = git_repo.rev_list([branch_name])
for commit in commits:
to_process.append(commit)
commit_count += 1
if commit_count == MAX_BULK_UPDATE_SIZE:
self.__process_commits_in_branch(enrich_backend, branch_name, to_process)
# reset the counter
to_process = []
commit_count = 0
if commit_count:
self.__process_commits_in_branch(enrich_backend, branch_name, to_process)
except Exception as e:
logger.error("Skip adding branch info for repo %s due to %s", git_repo.uri, e)
return | python | def add_commit_branches(self, git_repo, enrich_backend):
"""Add the information about branches to the documents representing commits in
the enriched index. Branches are obtained using the command `git ls-remote`,
then for each branch, the list of commits is retrieved via the command `git rev-list branch-name` and
used to update the corresponding items in the enriched index.
:param git_repo: GitRepository object
:param enrich_backend: the enrich backend
"""
to_process = []
for hash, refname in git_repo._discover_refs(remote=True):
if not refname.startswith('refs/heads/'):
continue
commit_count = 0
branch_name = refname.replace('refs/heads/', '')
try:
commits = git_repo.rev_list([branch_name])
for commit in commits:
to_process.append(commit)
commit_count += 1
if commit_count == MAX_BULK_UPDATE_SIZE:
self.__process_commits_in_branch(enrich_backend, branch_name, to_process)
# reset the counter
to_process = []
commit_count = 0
if commit_count:
self.__process_commits_in_branch(enrich_backend, branch_name, to_process)
except Exception as e:
logger.error("Skip adding branch info for repo %s due to %s", git_repo.uri, e)
return | [
"def",
"add_commit_branches",
"(",
"self",
",",
"git_repo",
",",
"enrich_backend",
")",
":",
"to_process",
"=",
"[",
"]",
"for",
"hash",
",",
"refname",
"in",
"git_repo",
".",
"_discover_refs",
"(",
"remote",
"=",
"True",
")",
":",
"if",
"not",
"refname",
".",
"startswith",
"(",
"'refs/heads/'",
")",
":",
"continue",
"commit_count",
"=",
"0",
"branch_name",
"=",
"refname",
".",
"replace",
"(",
"'refs/heads/'",
",",
"''",
")",
"try",
":",
"commits",
"=",
"git_repo",
".",
"rev_list",
"(",
"[",
"branch_name",
"]",
")",
"for",
"commit",
"in",
"commits",
":",
"to_process",
".",
"append",
"(",
"commit",
")",
"commit_count",
"+=",
"1",
"if",
"commit_count",
"==",
"MAX_BULK_UPDATE_SIZE",
":",
"self",
".",
"__process_commits_in_branch",
"(",
"enrich_backend",
",",
"branch_name",
",",
"to_process",
")",
"# reset the counter",
"to_process",
"=",
"[",
"]",
"commit_count",
"=",
"0",
"if",
"commit_count",
":",
"self",
".",
"__process_commits_in_branch",
"(",
"enrich_backend",
",",
"branch_name",
",",
"to_process",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"Skip adding branch info for repo %s due to %s\"",
",",
"git_repo",
".",
"uri",
",",
"e",
")",
"return"
]
| Add the information about branches to the documents representing commits in
the enriched index. Branches are obtained using the command `git ls-remote`,
then for each branch, the list of commits is retrieved via the command `git rev-list branch-name` and
used to update the corresponding items in the enriched index.
:param git_repo: GitRepository object
:param enrich_backend: the enrich backend | [
"Add",
"the",
"information",
"about",
"branches",
"to",
"the",
"documents",
"representing",
"commits",
"in",
"the",
"enriched",
"index",
".",
"Branches",
"are",
"obtained",
"using",
"the",
"command",
"git",
"ls",
"-",
"remote",
"then",
"for",
"each",
"branch",
"the",
"list",
"of",
"commits",
"is",
"retrieved",
"via",
"the",
"command",
"git",
"rev",
"-",
"list",
"branch",
"-",
"name",
"and",
"used",
"to",
"update",
"the",
"corresponding",
"items",
"in",
"the",
"enriched",
"index",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/git.py#L770-L807 | train |
chaoss/grimoirelab-elk | utils/gelk_mapping.py | find_ds_mapping | def find_ds_mapping(data_source, es_major_version):
"""
Find the mapping given a perceval data source
:param data_source: name of the perceval data source
:param es_major_version: string with the major version for Elasticsearch
:return: a dict with the mappings (raw and enriched)
"""
mappings = {"raw": None,
"enriched": None}
# Backend connectors
connectors = get_connectors()
try:
raw_klass = connectors[data_source][1]
enrich_klass = connectors[data_source][2]
except KeyError:
print("Data source not found", data_source)
sys.exit(1)
# Mapping for raw index
backend = raw_klass(None)
if backend:
mapping = json.loads(backend.mapping.get_elastic_mappings(es_major_version)['items'])
mappings['raw'] = [mapping, find_general_mappings(es_major_version)]
# Mapping for enriched index
backend = enrich_klass(None)
if backend:
mapping = json.loads(backend.mapping.get_elastic_mappings(es_major_version)['items'])
mappings['enriched'] = [mapping, find_general_mappings(es_major_version)]
return mappings | python | def find_ds_mapping(data_source, es_major_version):
"""
Find the mapping given a perceval data source
:param data_source: name of the perceval data source
:param es_major_version: string with the major version for Elasticsearch
:return: a dict with the mappings (raw and enriched)
"""
mappings = {"raw": None,
"enriched": None}
# Backend connectors
connectors = get_connectors()
try:
raw_klass = connectors[data_source][1]
enrich_klass = connectors[data_source][2]
except KeyError:
print("Data source not found", data_source)
sys.exit(1)
# Mapping for raw index
backend = raw_klass(None)
if backend:
mapping = json.loads(backend.mapping.get_elastic_mappings(es_major_version)['items'])
mappings['raw'] = [mapping, find_general_mappings(es_major_version)]
# Mapping for enriched index
backend = enrich_klass(None)
if backend:
mapping = json.loads(backend.mapping.get_elastic_mappings(es_major_version)['items'])
mappings['enriched'] = [mapping, find_general_mappings(es_major_version)]
return mappings | [
"def",
"find_ds_mapping",
"(",
"data_source",
",",
"es_major_version",
")",
":",
"mappings",
"=",
"{",
"\"raw\"",
":",
"None",
",",
"\"enriched\"",
":",
"None",
"}",
"# Backend connectors",
"connectors",
"=",
"get_connectors",
"(",
")",
"try",
":",
"raw_klass",
"=",
"connectors",
"[",
"data_source",
"]",
"[",
"1",
"]",
"enrich_klass",
"=",
"connectors",
"[",
"data_source",
"]",
"[",
"2",
"]",
"except",
"KeyError",
":",
"print",
"(",
"\"Data source not found\"",
",",
"data_source",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# Mapping for raw index",
"backend",
"=",
"raw_klass",
"(",
"None",
")",
"if",
"backend",
":",
"mapping",
"=",
"json",
".",
"loads",
"(",
"backend",
".",
"mapping",
".",
"get_elastic_mappings",
"(",
"es_major_version",
")",
"[",
"'items'",
"]",
")",
"mappings",
"[",
"'raw'",
"]",
"=",
"[",
"mapping",
",",
"find_general_mappings",
"(",
"es_major_version",
")",
"]",
"# Mapping for enriched index",
"backend",
"=",
"enrich_klass",
"(",
"None",
")",
"if",
"backend",
":",
"mapping",
"=",
"json",
".",
"loads",
"(",
"backend",
".",
"mapping",
".",
"get_elastic_mappings",
"(",
"es_major_version",
")",
"[",
"'items'",
"]",
")",
"mappings",
"[",
"'enriched'",
"]",
"=",
"[",
"mapping",
",",
"find_general_mappings",
"(",
"es_major_version",
")",
"]",
"return",
"mappings"
]
| Find the mapping given a perceval data source
:param data_source: name of the perceval data source
:param es_major_version: string with the major version for Elasticsearch
:return: a dict with the mappings (raw and enriched) | [
"Find",
"the",
"mapping",
"given",
"a",
"perceval",
"data",
"source"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/utils/gelk_mapping.py#L95-L128 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/study_ceres_aoc.py | areas_of_code | def areas_of_code(git_enrich, in_conn, out_conn, block_size=100):
"""Build and index for areas of code from a given Perceval RAW index.
:param block_size: size of items block.
:param git_enrich: GitEnrich object to deal with SortingHat affiliations.
:param in_conn: ESPandasConnector to read from.
:param out_conn: ESPandasConnector to write to.
:return: number of documents written in ElasticSearch enriched index.
"""
aoc = AreasOfCode(in_connector=in_conn, out_connector=out_conn, block_size=block_size,
git_enrich=git_enrich)
ndocs = aoc.analyze()
return ndocs | python | def areas_of_code(git_enrich, in_conn, out_conn, block_size=100):
"""Build and index for areas of code from a given Perceval RAW index.
:param block_size: size of items block.
:param git_enrich: GitEnrich object to deal with SortingHat affiliations.
:param in_conn: ESPandasConnector to read from.
:param out_conn: ESPandasConnector to write to.
:return: number of documents written in ElasticSearch enriched index.
"""
aoc = AreasOfCode(in_connector=in_conn, out_connector=out_conn, block_size=block_size,
git_enrich=git_enrich)
ndocs = aoc.analyze()
return ndocs | [
"def",
"areas_of_code",
"(",
"git_enrich",
",",
"in_conn",
",",
"out_conn",
",",
"block_size",
"=",
"100",
")",
":",
"aoc",
"=",
"AreasOfCode",
"(",
"in_connector",
"=",
"in_conn",
",",
"out_connector",
"=",
"out_conn",
",",
"block_size",
"=",
"block_size",
",",
"git_enrich",
"=",
"git_enrich",
")",
"ndocs",
"=",
"aoc",
".",
"analyze",
"(",
")",
"return",
"ndocs"
]
| Build and index for areas of code from a given Perceval RAW index.
:param block_size: size of items block.
:param git_enrich: GitEnrich object to deal with SortingHat affiliations.
:param in_conn: ESPandasConnector to read from.
:param out_conn: ESPandasConnector to write to.
:return: number of documents written in ElasticSearch enriched index. | [
"Build",
"and",
"index",
"for",
"areas",
"of",
"code",
"from",
"a",
"given",
"Perceval",
"RAW",
"index",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/study_ceres_aoc.py#L214-L226 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/study_ceres_aoc.py | AreasOfCode.process | def process(self, items_block):
"""Process items to add file related information.
Eventize items creating one new item per each file found in the commit (excluding
files with no actions performed on them). For each event, file path, file name,
path parts, file type and file extension are added as fields.
:param items_block: items to be processed. Expects to find ElasticSearch hits _source part only.
"""
logger.info(self.__log_prefix + " New commits: " + str(len(items_block)))
# Create events from commits
git_events = Git(items_block, self._git_enrich)
events_df = git_events.eventize(2)
logger.info(self.__log_prefix + " New events: " + str(len(events_df)))
if len(events_df) > 0:
# Filter information
data_filtered = FilterRows(events_df)
events_df = data_filtered.filter_(["filepath"], "-")
logger.info(self.__log_prefix + " New events filtered: " + str(len(events_df)))
events_df['message'] = events_df['message'].str.slice(stop=AreasOfCode.MESSAGE_MAX_SIZE)
logger.info(self.__log_prefix + " Remove message content")
# Add filetype info
enriched_filetype = FileType(events_df)
events_df = enriched_filetype.enrich('filepath')
logger.info(self.__log_prefix + " New Filetype events: " + str(len(events_df)))
# Split filepath info
enriched_filepath = FilePath(events_df)
events_df = enriched_filepath.enrich('filepath')
logger.info(self.__log_prefix + " New Filepath events: " + str(len(events_df)))
# Deal with surrogates
convert = ToUTF8(events_df)
events_df = convert.enrich(["owner"])
logger.info(self.__log_prefix + " Final new events: " + str(len(events_df)))
return self.ProcessResults(processed=len(events_df), out_items=events_df) | python | def process(self, items_block):
"""Process items to add file related information.
Eventize items creating one new item per each file found in the commit (excluding
files with no actions performed on them). For each event, file path, file name,
path parts, file type and file extension are added as fields.
:param items_block: items to be processed. Expects to find ElasticSearch hits _source part only.
"""
logger.info(self.__log_prefix + " New commits: " + str(len(items_block)))
# Create events from commits
git_events = Git(items_block, self._git_enrich)
events_df = git_events.eventize(2)
logger.info(self.__log_prefix + " New events: " + str(len(events_df)))
if len(events_df) > 0:
# Filter information
data_filtered = FilterRows(events_df)
events_df = data_filtered.filter_(["filepath"], "-")
logger.info(self.__log_prefix + " New events filtered: " + str(len(events_df)))
events_df['message'] = events_df['message'].str.slice(stop=AreasOfCode.MESSAGE_MAX_SIZE)
logger.info(self.__log_prefix + " Remove message content")
# Add filetype info
enriched_filetype = FileType(events_df)
events_df = enriched_filetype.enrich('filepath')
logger.info(self.__log_prefix + " New Filetype events: " + str(len(events_df)))
# Split filepath info
enriched_filepath = FilePath(events_df)
events_df = enriched_filepath.enrich('filepath')
logger.info(self.__log_prefix + " New Filepath events: " + str(len(events_df)))
# Deal with surrogates
convert = ToUTF8(events_df)
events_df = convert.enrich(["owner"])
logger.info(self.__log_prefix + " Final new events: " + str(len(events_df)))
return self.ProcessResults(processed=len(events_df), out_items=events_df) | [
"def",
"process",
"(",
"self",
",",
"items_block",
")",
":",
"logger",
".",
"info",
"(",
"self",
".",
"__log_prefix",
"+",
"\" New commits: \"",
"+",
"str",
"(",
"len",
"(",
"items_block",
")",
")",
")",
"# Create events from commits",
"git_events",
"=",
"Git",
"(",
"items_block",
",",
"self",
".",
"_git_enrich",
")",
"events_df",
"=",
"git_events",
".",
"eventize",
"(",
"2",
")",
"logger",
".",
"info",
"(",
"self",
".",
"__log_prefix",
"+",
"\" New events: \"",
"+",
"str",
"(",
"len",
"(",
"events_df",
")",
")",
")",
"if",
"len",
"(",
"events_df",
")",
">",
"0",
":",
"# Filter information",
"data_filtered",
"=",
"FilterRows",
"(",
"events_df",
")",
"events_df",
"=",
"data_filtered",
".",
"filter_",
"(",
"[",
"\"filepath\"",
"]",
",",
"\"-\"",
")",
"logger",
".",
"info",
"(",
"self",
".",
"__log_prefix",
"+",
"\" New events filtered: \"",
"+",
"str",
"(",
"len",
"(",
"events_df",
")",
")",
")",
"events_df",
"[",
"'message'",
"]",
"=",
"events_df",
"[",
"'message'",
"]",
".",
"str",
".",
"slice",
"(",
"stop",
"=",
"AreasOfCode",
".",
"MESSAGE_MAX_SIZE",
")",
"logger",
".",
"info",
"(",
"self",
".",
"__log_prefix",
"+",
"\" Remove message content\"",
")",
"# Add filetype info",
"enriched_filetype",
"=",
"FileType",
"(",
"events_df",
")",
"events_df",
"=",
"enriched_filetype",
".",
"enrich",
"(",
"'filepath'",
")",
"logger",
".",
"info",
"(",
"self",
".",
"__log_prefix",
"+",
"\" New Filetype events: \"",
"+",
"str",
"(",
"len",
"(",
"events_df",
")",
")",
")",
"# Split filepath info",
"enriched_filepath",
"=",
"FilePath",
"(",
"events_df",
")",
"events_df",
"=",
"enriched_filepath",
".",
"enrich",
"(",
"'filepath'",
")",
"logger",
".",
"info",
"(",
"self",
".",
"__log_prefix",
"+",
"\" New Filepath events: \"",
"+",
"str",
"(",
"len",
"(",
"events_df",
")",
")",
")",
"# Deal with surrogates",
"convert",
"=",
"ToUTF8",
"(",
"events_df",
")",
"events_df",
"=",
"convert",
".",
"enrich",
"(",
"[",
"\"owner\"",
"]",
")",
"logger",
".",
"info",
"(",
"self",
".",
"__log_prefix",
"+",
"\" Final new events: \"",
"+",
"str",
"(",
"len",
"(",
"events_df",
")",
")",
")",
"return",
"self",
".",
"ProcessResults",
"(",
"processed",
"=",
"len",
"(",
"events_df",
")",
",",
"out_items",
"=",
"events_df",
")"
]
| Process items to add file related information.
Eventize items creating one new item per each file found in the commit (excluding
files with no actions performed on them). For each event, file path, file name,
path parts, file type and file extension are added as fields.
:param items_block: items to be processed. Expects to find ElasticSearch hits _source part only. | [
"Process",
"items",
"to",
"add",
"file",
"related",
"information",
"."
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/study_ceres_aoc.py#L165-L211 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/utils.py | get_time_diff_days | def get_time_diff_days(start, end):
''' Number of days between two dates in UTC format '''
if start is None or end is None:
return None
if type(start) is not datetime.datetime:
start = parser.parse(start).replace(tzinfo=None)
if type(end) is not datetime.datetime:
end = parser.parse(end).replace(tzinfo=None)
seconds_day = float(60 * 60 * 24)
diff_days = (end - start).total_seconds() / seconds_day
diff_days = float('%.2f' % diff_days)
return diff_days | python | def get_time_diff_days(start, end):
''' Number of days between two dates in UTC format '''
if start is None or end is None:
return None
if type(start) is not datetime.datetime:
start = parser.parse(start).replace(tzinfo=None)
if type(end) is not datetime.datetime:
end = parser.parse(end).replace(tzinfo=None)
seconds_day = float(60 * 60 * 24)
diff_days = (end - start).total_seconds() / seconds_day
diff_days = float('%.2f' % diff_days)
return diff_days | [
"def",
"get_time_diff_days",
"(",
"start",
",",
"end",
")",
":",
"if",
"start",
"is",
"None",
"or",
"end",
"is",
"None",
":",
"return",
"None",
"if",
"type",
"(",
"start",
")",
"is",
"not",
"datetime",
".",
"datetime",
":",
"start",
"=",
"parser",
".",
"parse",
"(",
"start",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"if",
"type",
"(",
"end",
")",
"is",
"not",
"datetime",
".",
"datetime",
":",
"end",
"=",
"parser",
".",
"parse",
"(",
"end",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"seconds_day",
"=",
"float",
"(",
"60",
"*",
"60",
"*",
"24",
")",
"diff_days",
"=",
"(",
"end",
"-",
"start",
")",
".",
"total_seconds",
"(",
")",
"/",
"seconds_day",
"diff_days",
"=",
"float",
"(",
"'%.2f'",
"%",
"diff_days",
")",
"return",
"diff_days"
]
| Number of days between two dates in UTC format | [
"Number",
"of",
"days",
"between",
"two",
"dates",
"in",
"UTC",
"format"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/utils.py#L91-L106 | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/phabricator.py | PhabricatorEnrich.__fill_phab_ids | def __fill_phab_ids(self, item):
""" Get mappings between phab ids and names """
for p in item['projects']:
if p and 'name' in p and 'phid' in p:
self.phab_ids_names[p['phid']] = p['name']
if 'authorData' not in item['fields'] or not item['fields']['authorData']:
return
self.phab_ids_names[item['fields']['authorData']['phid']] = item['fields']['authorData']['userName']
if 'ownerData' in item['fields'] and item['fields']['ownerData']:
self.phab_ids_names[item['fields']['ownerData']['phid']] = item['fields']['ownerData']['userName']
if 'priority' in item['fields']:
val = item['fields']['priority']['value']
self.phab_ids_names[str(val)] = item['fields']['priority']['name']
for t in item['transactions']:
if 'authorData' in t and t['authorData'] and 'userName' in t['authorData']:
self.phab_ids_names[t['authorData']['phid']] = t['authorData']['userName']
elif t['authorData'] and 'name' in t['authorData']:
# Herald
self.phab_ids_names[t['authorData']['phid']] = t['authorData']['name'] | python | def __fill_phab_ids(self, item):
""" Get mappings between phab ids and names """
for p in item['projects']:
if p and 'name' in p and 'phid' in p:
self.phab_ids_names[p['phid']] = p['name']
if 'authorData' not in item['fields'] or not item['fields']['authorData']:
return
self.phab_ids_names[item['fields']['authorData']['phid']] = item['fields']['authorData']['userName']
if 'ownerData' in item['fields'] and item['fields']['ownerData']:
self.phab_ids_names[item['fields']['ownerData']['phid']] = item['fields']['ownerData']['userName']
if 'priority' in item['fields']:
val = item['fields']['priority']['value']
self.phab_ids_names[str(val)] = item['fields']['priority']['name']
for t in item['transactions']:
if 'authorData' in t and t['authorData'] and 'userName' in t['authorData']:
self.phab_ids_names[t['authorData']['phid']] = t['authorData']['userName']
elif t['authorData'] and 'name' in t['authorData']:
# Herald
self.phab_ids_names[t['authorData']['phid']] = t['authorData']['name'] | [
"def",
"__fill_phab_ids",
"(",
"self",
",",
"item",
")",
":",
"for",
"p",
"in",
"item",
"[",
"'projects'",
"]",
":",
"if",
"p",
"and",
"'name'",
"in",
"p",
"and",
"'phid'",
"in",
"p",
":",
"self",
".",
"phab_ids_names",
"[",
"p",
"[",
"'phid'",
"]",
"]",
"=",
"p",
"[",
"'name'",
"]",
"if",
"'authorData'",
"not",
"in",
"item",
"[",
"'fields'",
"]",
"or",
"not",
"item",
"[",
"'fields'",
"]",
"[",
"'authorData'",
"]",
":",
"return",
"self",
".",
"phab_ids_names",
"[",
"item",
"[",
"'fields'",
"]",
"[",
"'authorData'",
"]",
"[",
"'phid'",
"]",
"]",
"=",
"item",
"[",
"'fields'",
"]",
"[",
"'authorData'",
"]",
"[",
"'userName'",
"]",
"if",
"'ownerData'",
"in",
"item",
"[",
"'fields'",
"]",
"and",
"item",
"[",
"'fields'",
"]",
"[",
"'ownerData'",
"]",
":",
"self",
".",
"phab_ids_names",
"[",
"item",
"[",
"'fields'",
"]",
"[",
"'ownerData'",
"]",
"[",
"'phid'",
"]",
"]",
"=",
"item",
"[",
"'fields'",
"]",
"[",
"'ownerData'",
"]",
"[",
"'userName'",
"]",
"if",
"'priority'",
"in",
"item",
"[",
"'fields'",
"]",
":",
"val",
"=",
"item",
"[",
"'fields'",
"]",
"[",
"'priority'",
"]",
"[",
"'value'",
"]",
"self",
".",
"phab_ids_names",
"[",
"str",
"(",
"val",
")",
"]",
"=",
"item",
"[",
"'fields'",
"]",
"[",
"'priority'",
"]",
"[",
"'name'",
"]",
"for",
"t",
"in",
"item",
"[",
"'transactions'",
"]",
":",
"if",
"'authorData'",
"in",
"t",
"and",
"t",
"[",
"'authorData'",
"]",
"and",
"'userName'",
"in",
"t",
"[",
"'authorData'",
"]",
":",
"self",
".",
"phab_ids_names",
"[",
"t",
"[",
"'authorData'",
"]",
"[",
"'phid'",
"]",
"]",
"=",
"t",
"[",
"'authorData'",
"]",
"[",
"'userName'",
"]",
"elif",
"t",
"[",
"'authorData'",
"]",
"and",
"'name'",
"in",
"t",
"[",
"'authorData'",
"]",
":",
"# Herald",
"self",
".",
"phab_ids_names",
"[",
"t",
"[",
"'authorData'",
"]",
"[",
"'phid'",
"]",
"]",
"=",
"t",
"[",
"'authorData'",
"]",
"[",
"'name'",
"]"
]
| Get mappings between phab ids and names | [
"Get",
"mappings",
"between",
"phab",
"ids",
"and",
"names"
]
| 64e08b324b36d9f6909bf705145d6451c8d34e65 | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/phabricator.py#L229-L247 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.