repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
hbldh/dlxsudoku
|
dlxsudoku/sudoku.py
|
Sudoku.set_cell
|
def set_cell(self, i, j, value):
"""Set a cell's value, with a series of safety checks
:param i: The row number
:type i: int
:param j: The column number
:type j: int
:param value: The value to set
:type value: int
:raises: :py:class:`dlxsudoku.exceptions.SudokuHasNoSolutionError`
"""
bool_tests = [
value in self._possibles[i][j],
value in self._poss_rows[i],
value in self._poss_cols[j],
value in self._poss_box[(i // self.order) * self.order + (j // self.order)],
value not in self.row(i),
value not in self.col(j),
value not in self.box(i, j)
]
if all(bool_tests):
self[i][j] = value
else:
raise SudokuHasNoSolutionError("This value cannot be set here!")
|
python
|
def set_cell(self, i, j, value):
"""Set a cell's value, with a series of safety checks
:param i: The row number
:type i: int
:param j: The column number
:type j: int
:param value: The value to set
:type value: int
:raises: :py:class:`dlxsudoku.exceptions.SudokuHasNoSolutionError`
"""
bool_tests = [
value in self._possibles[i][j],
value in self._poss_rows[i],
value in self._poss_cols[j],
value in self._poss_box[(i // self.order) * self.order + (j // self.order)],
value not in self.row(i),
value not in self.col(j),
value not in self.box(i, j)
]
if all(bool_tests):
self[i][j] = value
else:
raise SudokuHasNoSolutionError("This value cannot be set here!")
|
[
"def",
"set_cell",
"(",
"self",
",",
"i",
",",
"j",
",",
"value",
")",
":",
"bool_tests",
"=",
"[",
"value",
"in",
"self",
".",
"_possibles",
"[",
"i",
"]",
"[",
"j",
"]",
",",
"value",
"in",
"self",
".",
"_poss_rows",
"[",
"i",
"]",
",",
"value",
"in",
"self",
".",
"_poss_cols",
"[",
"j",
"]",
",",
"value",
"in",
"self",
".",
"_poss_box",
"[",
"(",
"i",
"//",
"self",
".",
"order",
")",
"*",
"self",
".",
"order",
"+",
"(",
"j",
"//",
"self",
".",
"order",
")",
"]",
",",
"value",
"not",
"in",
"self",
".",
"row",
"(",
"i",
")",
",",
"value",
"not",
"in",
"self",
".",
"col",
"(",
"j",
")",
",",
"value",
"not",
"in",
"self",
".",
"box",
"(",
"i",
",",
"j",
")",
"]",
"if",
"all",
"(",
"bool_tests",
")",
":",
"self",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"value",
"else",
":",
"raise",
"SudokuHasNoSolutionError",
"(",
"\"This value cannot be set here!\"",
")"
] |
Set a cell's value, with a series of safety checks
:param i: The row number
:type i: int
:param j: The column number
:type j: int
:param value: The value to set
:type value: int
:raises: :py:class:`dlxsudoku.exceptions.SudokuHasNoSolutionError`
|
[
"Set",
"a",
"cell",
"s",
"value",
"with",
"a",
"series",
"of",
"safety",
"checks"
] |
8d774e0883eb615533d04f07e58a95db716226e0
|
https://github.com/hbldh/dlxsudoku/blob/8d774e0883eb615533d04f07e58a95db716226e0/dlxsudoku/sudoku.py#L177-L202
|
train
|
hbldh/dlxsudoku
|
dlxsudoku/sudoku.py
|
Sudoku.solve
|
def solve(self, verbose=False, allow_brute_force=True):
"""Solve the Sudoku.
:param verbose: If the steps used for solving the Sudoku
should be printed. Default is `False`
:type verbose: bool
:param allow_brute_force: If Dancing Links Brute Force method
should be used if necessary. Default is `True`
:type allow_brute_force: bool
"""
while not self.is_solved:
# Update possibles arrays.
self._update()
# See if any position can be singled out.
singles_found = False or self._fill_naked_singles() or self._fill_hidden_singles()
# If singles_found is False, then no new uniquely defined cells were found
# and this solver cannot solve the Sudoku. We either use brute force or throw an error.
# Else, if singles_found is True, run another iteration to see if new singles have shown up.
if not singles_found:
if allow_brute_force:
solution = None
try:
dlxs = DancingLinksSolver(copy.deepcopy(self._matrix))
solutions = dlxs.solve()
solution = next(solutions)
more_solutions = next(solutions)
except StopIteration as e:
if solution is not None:
self._matrix = solution
else:
raise SudokuHasNoSolutionError("Dancing Links solver could not find any solution.")
except Exception as e:
raise SudokuHasNoSolutionError("Brute Force method failed.")
else:
# We end up here if the second `next(solutions)` works,
# i.e. if multiple solutions exist.
raise SudokuHasMultipleSolutionsError("This Sudoku has multiple solutions!")
self.solution_steps.append("BRUTE FORCE - Dancing Links")
break
else:
print(self)
raise SudokuTooDifficultError("This Sudoku requires more advanced methods!")
if verbose:
print("Sudoku solved in {0} iterations!\n{1}".format(len(self.solution_steps), self))
for step in self.solution_steps:
print(step)
|
python
|
def solve(self, verbose=False, allow_brute_force=True):
"""Solve the Sudoku.
:param verbose: If the steps used for solving the Sudoku
should be printed. Default is `False`
:type verbose: bool
:param allow_brute_force: If Dancing Links Brute Force method
should be used if necessary. Default is `True`
:type allow_brute_force: bool
"""
while not self.is_solved:
# Update possibles arrays.
self._update()
# See if any position can be singled out.
singles_found = False or self._fill_naked_singles() or self._fill_hidden_singles()
# If singles_found is False, then no new uniquely defined cells were found
# and this solver cannot solve the Sudoku. We either use brute force or throw an error.
# Else, if singles_found is True, run another iteration to see if new singles have shown up.
if not singles_found:
if allow_brute_force:
solution = None
try:
dlxs = DancingLinksSolver(copy.deepcopy(self._matrix))
solutions = dlxs.solve()
solution = next(solutions)
more_solutions = next(solutions)
except StopIteration as e:
if solution is not None:
self._matrix = solution
else:
raise SudokuHasNoSolutionError("Dancing Links solver could not find any solution.")
except Exception as e:
raise SudokuHasNoSolutionError("Brute Force method failed.")
else:
# We end up here if the second `next(solutions)` works,
# i.e. if multiple solutions exist.
raise SudokuHasMultipleSolutionsError("This Sudoku has multiple solutions!")
self.solution_steps.append("BRUTE FORCE - Dancing Links")
break
else:
print(self)
raise SudokuTooDifficultError("This Sudoku requires more advanced methods!")
if verbose:
print("Sudoku solved in {0} iterations!\n{1}".format(len(self.solution_steps), self))
for step in self.solution_steps:
print(step)
|
[
"def",
"solve",
"(",
"self",
",",
"verbose",
"=",
"False",
",",
"allow_brute_force",
"=",
"True",
")",
":",
"while",
"not",
"self",
".",
"is_solved",
":",
"# Update possibles arrays.",
"self",
".",
"_update",
"(",
")",
"# See if any position can be singled out.",
"singles_found",
"=",
"False",
"or",
"self",
".",
"_fill_naked_singles",
"(",
")",
"or",
"self",
".",
"_fill_hidden_singles",
"(",
")",
"# If singles_found is False, then no new uniquely defined cells were found",
"# and this solver cannot solve the Sudoku. We either use brute force or throw an error.",
"# Else, if singles_found is True, run another iteration to see if new singles have shown up.",
"if",
"not",
"singles_found",
":",
"if",
"allow_brute_force",
":",
"solution",
"=",
"None",
"try",
":",
"dlxs",
"=",
"DancingLinksSolver",
"(",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_matrix",
")",
")",
"solutions",
"=",
"dlxs",
".",
"solve",
"(",
")",
"solution",
"=",
"next",
"(",
"solutions",
")",
"more_solutions",
"=",
"next",
"(",
"solutions",
")",
"except",
"StopIteration",
"as",
"e",
":",
"if",
"solution",
"is",
"not",
"None",
":",
"self",
".",
"_matrix",
"=",
"solution",
"else",
":",
"raise",
"SudokuHasNoSolutionError",
"(",
"\"Dancing Links solver could not find any solution.\"",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"SudokuHasNoSolutionError",
"(",
"\"Brute Force method failed.\"",
")",
"else",
":",
"# We end up here if the second `next(solutions)` works,",
"# i.e. if multiple solutions exist.",
"raise",
"SudokuHasMultipleSolutionsError",
"(",
"\"This Sudoku has multiple solutions!\"",
")",
"self",
".",
"solution_steps",
".",
"append",
"(",
"\"BRUTE FORCE - Dancing Links\"",
")",
"break",
"else",
":",
"print",
"(",
"self",
")",
"raise",
"SudokuTooDifficultError",
"(",
"\"This Sudoku requires more advanced methods!\"",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Sudoku solved in {0} iterations!\\n{1}\"",
".",
"format",
"(",
"len",
"(",
"self",
".",
"solution_steps",
")",
",",
"self",
")",
")",
"for",
"step",
"in",
"self",
".",
"solution_steps",
":",
"print",
"(",
"step",
")"
] |
Solve the Sudoku.
:param verbose: If the steps used for solving the Sudoku
should be printed. Default is `False`
:type verbose: bool
:param allow_brute_force: If Dancing Links Brute Force method
should be used if necessary. Default is `True`
:type allow_brute_force: bool
|
[
"Solve",
"the",
"Sudoku",
"."
] |
8d774e0883eb615533d04f07e58a95db716226e0
|
https://github.com/hbldh/dlxsudoku/blob/8d774e0883eb615533d04f07e58a95db716226e0/dlxsudoku/sudoku.py#L209-L257
|
train
|
hbldh/dlxsudoku
|
dlxsudoku/sudoku.py
|
Sudoku._update
|
def _update(self):
"""Calculate remaining values for each row, column, box and finally cell."""
# Update possible values in each row, column and box.
for i, (row, col, box) in enumerate(zip(self.row_iter(), self.col_iter(), self.box_iter())):
self._poss_rows[i] = set(self._values).difference(set(row))
self._poss_cols[i] = set(self._values).difference(set(col))
self._poss_box[i] = set(self._values).difference(set(box))
# Iterate over the entire Sudoku and combine information about possible values
# from rows, columns and boxes to get a set of possible values for each cell.
for i in utils.range_(self.side):
self._possibles[i] = {}
for j in utils.range_(self.side):
self._possibles[i][j] = set()
if self[i][j] > 0:
continue
this_box_index = ((i // self.order) * self.order) + (j // self.order)
self._possibles[i][j] = self._poss_rows[i].intersection(
self._poss_cols[j]).intersection(self._poss_box[this_box_index])
|
python
|
def _update(self):
"""Calculate remaining values for each row, column, box and finally cell."""
# Update possible values in each row, column and box.
for i, (row, col, box) in enumerate(zip(self.row_iter(), self.col_iter(), self.box_iter())):
self._poss_rows[i] = set(self._values).difference(set(row))
self._poss_cols[i] = set(self._values).difference(set(col))
self._poss_box[i] = set(self._values).difference(set(box))
# Iterate over the entire Sudoku and combine information about possible values
# from rows, columns and boxes to get a set of possible values for each cell.
for i in utils.range_(self.side):
self._possibles[i] = {}
for j in utils.range_(self.side):
self._possibles[i][j] = set()
if self[i][j] > 0:
continue
this_box_index = ((i // self.order) * self.order) + (j // self.order)
self._possibles[i][j] = self._poss_rows[i].intersection(
self._poss_cols[j]).intersection(self._poss_box[this_box_index])
|
[
"def",
"_update",
"(",
"self",
")",
":",
"# Update possible values in each row, column and box.",
"for",
"i",
",",
"(",
"row",
",",
"col",
",",
"box",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"self",
".",
"row_iter",
"(",
")",
",",
"self",
".",
"col_iter",
"(",
")",
",",
"self",
".",
"box_iter",
"(",
")",
")",
")",
":",
"self",
".",
"_poss_rows",
"[",
"i",
"]",
"=",
"set",
"(",
"self",
".",
"_values",
")",
".",
"difference",
"(",
"set",
"(",
"row",
")",
")",
"self",
".",
"_poss_cols",
"[",
"i",
"]",
"=",
"set",
"(",
"self",
".",
"_values",
")",
".",
"difference",
"(",
"set",
"(",
"col",
")",
")",
"self",
".",
"_poss_box",
"[",
"i",
"]",
"=",
"set",
"(",
"self",
".",
"_values",
")",
".",
"difference",
"(",
"set",
"(",
"box",
")",
")",
"# Iterate over the entire Sudoku and combine information about possible values",
"# from rows, columns and boxes to get a set of possible values for each cell.",
"for",
"i",
"in",
"utils",
".",
"range_",
"(",
"self",
".",
"side",
")",
":",
"self",
".",
"_possibles",
"[",
"i",
"]",
"=",
"{",
"}",
"for",
"j",
"in",
"utils",
".",
"range_",
"(",
"self",
".",
"side",
")",
":",
"self",
".",
"_possibles",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"set",
"(",
")",
"if",
"self",
"[",
"i",
"]",
"[",
"j",
"]",
">",
"0",
":",
"continue",
"this_box_index",
"=",
"(",
"(",
"i",
"//",
"self",
".",
"order",
")",
"*",
"self",
".",
"order",
")",
"+",
"(",
"j",
"//",
"self",
".",
"order",
")",
"self",
".",
"_possibles",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"self",
".",
"_poss_rows",
"[",
"i",
"]",
".",
"intersection",
"(",
"self",
".",
"_poss_cols",
"[",
"j",
"]",
")",
".",
"intersection",
"(",
"self",
".",
"_poss_box",
"[",
"this_box_index",
"]",
")"
] |
Calculate remaining values for each row, column, box and finally cell.
|
[
"Calculate",
"remaining",
"values",
"for",
"each",
"row",
"column",
"box",
"and",
"finally",
"cell",
"."
] |
8d774e0883eb615533d04f07e58a95db716226e0
|
https://github.com/hbldh/dlxsudoku/blob/8d774e0883eb615533d04f07e58a95db716226e0/dlxsudoku/sudoku.py#L259-L277
|
train
|
hbldh/dlxsudoku
|
dlxsudoku/sudoku.py
|
Sudoku._fill_naked_singles
|
def _fill_naked_singles(self):
"""Look for naked singles, i.e. cells with ony one possible value.
:return: If any Naked Single has been found.
:rtype: bool
"""
simple_found = False
for i in utils.range_(self.side):
for j in utils.range_(self.side):
if self[i][j] > 0:
continue
p = self._possibles[i][j]
if len(p) == 1:
self.set_cell(i, j, list(p)[0])
self.solution_steps.append(self._format_step("NAKED", (i, j), self[i][j]))
simple_found = True
elif len(p) == 0:
raise SudokuHasNoSolutionError("Error made! No possible value for ({0},{1})!".format(i + 1, j + 1))
return simple_found
|
python
|
def _fill_naked_singles(self):
"""Look for naked singles, i.e. cells with ony one possible value.
:return: If any Naked Single has been found.
:rtype: bool
"""
simple_found = False
for i in utils.range_(self.side):
for j in utils.range_(self.side):
if self[i][j] > 0:
continue
p = self._possibles[i][j]
if len(p) == 1:
self.set_cell(i, j, list(p)[0])
self.solution_steps.append(self._format_step("NAKED", (i, j), self[i][j]))
simple_found = True
elif len(p) == 0:
raise SudokuHasNoSolutionError("Error made! No possible value for ({0},{1})!".format(i + 1, j + 1))
return simple_found
|
[
"def",
"_fill_naked_singles",
"(",
"self",
")",
":",
"simple_found",
"=",
"False",
"for",
"i",
"in",
"utils",
".",
"range_",
"(",
"self",
".",
"side",
")",
":",
"for",
"j",
"in",
"utils",
".",
"range_",
"(",
"self",
".",
"side",
")",
":",
"if",
"self",
"[",
"i",
"]",
"[",
"j",
"]",
">",
"0",
":",
"continue",
"p",
"=",
"self",
".",
"_possibles",
"[",
"i",
"]",
"[",
"j",
"]",
"if",
"len",
"(",
"p",
")",
"==",
"1",
":",
"self",
".",
"set_cell",
"(",
"i",
",",
"j",
",",
"list",
"(",
"p",
")",
"[",
"0",
"]",
")",
"self",
".",
"solution_steps",
".",
"append",
"(",
"self",
".",
"_format_step",
"(",
"\"NAKED\"",
",",
"(",
"i",
",",
"j",
")",
",",
"self",
"[",
"i",
"]",
"[",
"j",
"]",
")",
")",
"simple_found",
"=",
"True",
"elif",
"len",
"(",
"p",
")",
"==",
"0",
":",
"raise",
"SudokuHasNoSolutionError",
"(",
"\"Error made! No possible value for ({0},{1})!\"",
".",
"format",
"(",
"i",
"+",
"1",
",",
"j",
"+",
"1",
")",
")",
"return",
"simple_found"
] |
Look for naked singles, i.e. cells with ony one possible value.
:return: If any Naked Single has been found.
:rtype: bool
|
[
"Look",
"for",
"naked",
"singles",
"i",
".",
"e",
".",
"cells",
"with",
"ony",
"one",
"possible",
"value",
"."
] |
8d774e0883eb615533d04f07e58a95db716226e0
|
https://github.com/hbldh/dlxsudoku/blob/8d774e0883eb615533d04f07e58a95db716226e0/dlxsudoku/sudoku.py#L279-L299
|
train
|
hbldh/dlxsudoku
|
dlxsudoku/sudoku.py
|
Sudoku._fill_hidden_singles
|
def _fill_hidden_singles(self):
"""Look for hidden singles, i.e. cells with only one unique possible value in row, column or box.
:return: If any Hidden Single has been found.
:rtype: bool
"""
for i in utils.range_(self.side):
box_i = (i // self.order) * self.order
for j in utils.range_(self.side):
box_j = (j // self.order) * self.order
# Skip if this cell is determined already.
if self[i][j] > 0:
continue
# Look for hidden single in rows.
p = self._possibles[i][j]
for k in utils.range_(self.side):
if k == j:
continue
p = p.difference(self._possibles[i][k])
if len(p) == 1:
# Found a hidden single in a row!
self.set_cell(i, j, p.pop())
self.solution_steps.append(self._format_step("HIDDEN-ROW", (i, j), self[i][j]))
return True
# Look for hidden single in columns
p = self._possibles[i][j]
for k in utils.range_(self.side):
if k == i:
continue
p = p.difference(self._possibles[k][j])
if len(p) == 1:
# Found a hidden single in a column!
self.set_cell(i, j, p.pop())
self.solution_steps.append(self._format_step("HIDDEN-COL", (i, j), self[i][j]))
return True
# Look for hidden single in box
p = self._possibles[i][j]
for k in utils.range_(box_i, box_i + self.order):
for kk in utils.range_(box_j, box_j + self.order):
if k == i and kk == j:
continue
p = p.difference(self._possibles[k][kk])
if len(p) == 1:
# Found a hidden single in a box!
self.set_cell(i, j, p.pop())
self.solution_steps.append(self._format_step("HIDDEN-BOX", (i, j), self[i][j]))
return True
return False
|
python
|
def _fill_hidden_singles(self):
"""Look for hidden singles, i.e. cells with only one unique possible value in row, column or box.
:return: If any Hidden Single has been found.
:rtype: bool
"""
for i in utils.range_(self.side):
box_i = (i // self.order) * self.order
for j in utils.range_(self.side):
box_j = (j // self.order) * self.order
# Skip if this cell is determined already.
if self[i][j] > 0:
continue
# Look for hidden single in rows.
p = self._possibles[i][j]
for k in utils.range_(self.side):
if k == j:
continue
p = p.difference(self._possibles[i][k])
if len(p) == 1:
# Found a hidden single in a row!
self.set_cell(i, j, p.pop())
self.solution_steps.append(self._format_step("HIDDEN-ROW", (i, j), self[i][j]))
return True
# Look for hidden single in columns
p = self._possibles[i][j]
for k in utils.range_(self.side):
if k == i:
continue
p = p.difference(self._possibles[k][j])
if len(p) == 1:
# Found a hidden single in a column!
self.set_cell(i, j, p.pop())
self.solution_steps.append(self._format_step("HIDDEN-COL", (i, j), self[i][j]))
return True
# Look for hidden single in box
p = self._possibles[i][j]
for k in utils.range_(box_i, box_i + self.order):
for kk in utils.range_(box_j, box_j + self.order):
if k == i and kk == j:
continue
p = p.difference(self._possibles[k][kk])
if len(p) == 1:
# Found a hidden single in a box!
self.set_cell(i, j, p.pop())
self.solution_steps.append(self._format_step("HIDDEN-BOX", (i, j), self[i][j]))
return True
return False
|
[
"def",
"_fill_hidden_singles",
"(",
"self",
")",
":",
"for",
"i",
"in",
"utils",
".",
"range_",
"(",
"self",
".",
"side",
")",
":",
"box_i",
"=",
"(",
"i",
"//",
"self",
".",
"order",
")",
"*",
"self",
".",
"order",
"for",
"j",
"in",
"utils",
".",
"range_",
"(",
"self",
".",
"side",
")",
":",
"box_j",
"=",
"(",
"j",
"//",
"self",
".",
"order",
")",
"*",
"self",
".",
"order",
"# Skip if this cell is determined already.",
"if",
"self",
"[",
"i",
"]",
"[",
"j",
"]",
">",
"0",
":",
"continue",
"# Look for hidden single in rows.",
"p",
"=",
"self",
".",
"_possibles",
"[",
"i",
"]",
"[",
"j",
"]",
"for",
"k",
"in",
"utils",
".",
"range_",
"(",
"self",
".",
"side",
")",
":",
"if",
"k",
"==",
"j",
":",
"continue",
"p",
"=",
"p",
".",
"difference",
"(",
"self",
".",
"_possibles",
"[",
"i",
"]",
"[",
"k",
"]",
")",
"if",
"len",
"(",
"p",
")",
"==",
"1",
":",
"# Found a hidden single in a row!",
"self",
".",
"set_cell",
"(",
"i",
",",
"j",
",",
"p",
".",
"pop",
"(",
")",
")",
"self",
".",
"solution_steps",
".",
"append",
"(",
"self",
".",
"_format_step",
"(",
"\"HIDDEN-ROW\"",
",",
"(",
"i",
",",
"j",
")",
",",
"self",
"[",
"i",
"]",
"[",
"j",
"]",
")",
")",
"return",
"True",
"# Look for hidden single in columns",
"p",
"=",
"self",
".",
"_possibles",
"[",
"i",
"]",
"[",
"j",
"]",
"for",
"k",
"in",
"utils",
".",
"range_",
"(",
"self",
".",
"side",
")",
":",
"if",
"k",
"==",
"i",
":",
"continue",
"p",
"=",
"p",
".",
"difference",
"(",
"self",
".",
"_possibles",
"[",
"k",
"]",
"[",
"j",
"]",
")",
"if",
"len",
"(",
"p",
")",
"==",
"1",
":",
"# Found a hidden single in a column!",
"self",
".",
"set_cell",
"(",
"i",
",",
"j",
",",
"p",
".",
"pop",
"(",
")",
")",
"self",
".",
"solution_steps",
".",
"append",
"(",
"self",
".",
"_format_step",
"(",
"\"HIDDEN-COL\"",
",",
"(",
"i",
",",
"j",
")",
",",
"self",
"[",
"i",
"]",
"[",
"j",
"]",
")",
")",
"return",
"True",
"# Look for hidden single in box",
"p",
"=",
"self",
".",
"_possibles",
"[",
"i",
"]",
"[",
"j",
"]",
"for",
"k",
"in",
"utils",
".",
"range_",
"(",
"box_i",
",",
"box_i",
"+",
"self",
".",
"order",
")",
":",
"for",
"kk",
"in",
"utils",
".",
"range_",
"(",
"box_j",
",",
"box_j",
"+",
"self",
".",
"order",
")",
":",
"if",
"k",
"==",
"i",
"and",
"kk",
"==",
"j",
":",
"continue",
"p",
"=",
"p",
".",
"difference",
"(",
"self",
".",
"_possibles",
"[",
"k",
"]",
"[",
"kk",
"]",
")",
"if",
"len",
"(",
"p",
")",
"==",
"1",
":",
"# Found a hidden single in a box!",
"self",
".",
"set_cell",
"(",
"i",
",",
"j",
",",
"p",
".",
"pop",
"(",
")",
")",
"self",
".",
"solution_steps",
".",
"append",
"(",
"self",
".",
"_format_step",
"(",
"\"HIDDEN-BOX\"",
",",
"(",
"i",
",",
"j",
")",
",",
"self",
"[",
"i",
"]",
"[",
"j",
"]",
")",
")",
"return",
"True",
"return",
"False"
] |
Look for hidden singles, i.e. cells with only one unique possible value in row, column or box.
:return: If any Hidden Single has been found.
:rtype: bool
|
[
"Look",
"for",
"hidden",
"singles",
"i",
".",
"e",
".",
"cells",
"with",
"only",
"one",
"unique",
"possible",
"value",
"in",
"row",
"column",
"or",
"box",
"."
] |
8d774e0883eb615533d04f07e58a95db716226e0
|
https://github.com/hbldh/dlxsudoku/blob/8d774e0883eb615533d04f07e58a95db716226e0/dlxsudoku/sudoku.py#L301-L353
|
train
|
Capitains/MyCapytain
|
MyCapytain/resources/collections/dts/_base.py
|
DtsCollection.parse
|
def parse(cls, resource, direction="children", **additional_parameters) -> "DtsCollection":
""" Given a dict representation of a json object, generate a DTS Collection
:param resource:
:type resource: dict
:param direction: Direction of the hydra:members value
:return: DTSCollection parsed
:rtype: DtsCollection
"""
data = jsonld.expand(resource)
if len(data) == 0:
raise JsonLdCollectionMissing("Missing collection in JSON")
data = data[0]
obj = cls(
identifier=resource["@id"],
**additional_parameters
)
obj._parse_metadata(data)
obj._parse_members(data, direction=direction, **additional_parameters)
return obj
|
python
|
def parse(cls, resource, direction="children", **additional_parameters) -> "DtsCollection":
""" Given a dict representation of a json object, generate a DTS Collection
:param resource:
:type resource: dict
:param direction: Direction of the hydra:members value
:return: DTSCollection parsed
:rtype: DtsCollection
"""
data = jsonld.expand(resource)
if len(data) == 0:
raise JsonLdCollectionMissing("Missing collection in JSON")
data = data[0]
obj = cls(
identifier=resource["@id"],
**additional_parameters
)
obj._parse_metadata(data)
obj._parse_members(data, direction=direction, **additional_parameters)
return obj
|
[
"def",
"parse",
"(",
"cls",
",",
"resource",
",",
"direction",
"=",
"\"children\"",
",",
"*",
"*",
"additional_parameters",
")",
"->",
"\"DtsCollection\"",
":",
"data",
"=",
"jsonld",
".",
"expand",
"(",
"resource",
")",
"if",
"len",
"(",
"data",
")",
"==",
"0",
":",
"raise",
"JsonLdCollectionMissing",
"(",
"\"Missing collection in JSON\"",
")",
"data",
"=",
"data",
"[",
"0",
"]",
"obj",
"=",
"cls",
"(",
"identifier",
"=",
"resource",
"[",
"\"@id\"",
"]",
",",
"*",
"*",
"additional_parameters",
")",
"obj",
".",
"_parse_metadata",
"(",
"data",
")",
"obj",
".",
"_parse_members",
"(",
"data",
",",
"direction",
"=",
"direction",
",",
"*",
"*",
"additional_parameters",
")",
"return",
"obj"
] |
Given a dict representation of a json object, generate a DTS Collection
:param resource:
:type resource: dict
:param direction: Direction of the hydra:members value
:return: DTSCollection parsed
:rtype: DtsCollection
|
[
"Given",
"a",
"dict",
"representation",
"of",
"a",
"json",
"object",
"generate",
"a",
"DTS",
"Collection"
] |
b11bbf6b6ae141fc02be70471e3fbf6907be6593
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/collections/dts/_base.py#L72-L95
|
train
|
ChrisBeaumont/smother
|
smother/python.py
|
Visitor._filldown
|
def _filldown(self, lineno):
"""
Copy current_context into `lines` down up until lineno
"""
if self.line > lineno:
# XXX decorated functions make us jump backwards.
# understand this more
return
self.lines.extend(
self.current_context for _ in range(self.line, lineno))
self.line = lineno
|
python
|
def _filldown(self, lineno):
"""
Copy current_context into `lines` down up until lineno
"""
if self.line > lineno:
# XXX decorated functions make us jump backwards.
# understand this more
return
self.lines.extend(
self.current_context for _ in range(self.line, lineno))
self.line = lineno
|
[
"def",
"_filldown",
"(",
"self",
",",
"lineno",
")",
":",
"if",
"self",
".",
"line",
">",
"lineno",
":",
"# XXX decorated functions make us jump backwards.",
"# understand this more",
"return",
"self",
".",
"lines",
".",
"extend",
"(",
"self",
".",
"current_context",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"line",
",",
"lineno",
")",
")",
"self",
".",
"line",
"=",
"lineno"
] |
Copy current_context into `lines` down up until lineno
|
[
"Copy",
"current_context",
"into",
"lines",
"down",
"up",
"until",
"lineno"
] |
65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb
|
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L50-L61
|
train
|
ChrisBeaumont/smother
|
smother/python.py
|
Visitor._add_section
|
def _add_section(self, node):
"""
Register the current node as a new context block
"""
self._filldown(node.lineno)
# push a new context onto stack
self.context.append(node.name)
self._update_current_context()
for _ in map(self.visit, iter_child_nodes(node)):
pass
# restore current context
self.context.pop()
self._update_current_context()
|
python
|
def _add_section(self, node):
"""
Register the current node as a new context block
"""
self._filldown(node.lineno)
# push a new context onto stack
self.context.append(node.name)
self._update_current_context()
for _ in map(self.visit, iter_child_nodes(node)):
pass
# restore current context
self.context.pop()
self._update_current_context()
|
[
"def",
"_add_section",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"_filldown",
"(",
"node",
".",
"lineno",
")",
"# push a new context onto stack",
"self",
".",
"context",
".",
"append",
"(",
"node",
".",
"name",
")",
"self",
".",
"_update_current_context",
"(",
")",
"for",
"_",
"in",
"map",
"(",
"self",
".",
"visit",
",",
"iter_child_nodes",
"(",
"node",
")",
")",
":",
"pass",
"# restore current context",
"self",
".",
"context",
".",
"pop",
"(",
")",
"self",
".",
"_update_current_context",
"(",
")"
] |
Register the current node as a new context block
|
[
"Register",
"the",
"current",
"node",
"as",
"a",
"new",
"context",
"block"
] |
65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb
|
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L63-L78
|
train
|
ChrisBeaumont/smother
|
smother/python.py
|
PythonFile._module_name
|
def _module_name(filename):
"""
Try to find a module name for a file path
by stripping off a prefix found in sys.modules.
"""
absfile = os.path.abspath(filename)
match = filename
for base in [''] + sys.path:
base = os.path.abspath(base)
if absfile.startswith(base):
match = absfile[len(base):]
break
return SUFFIX_RE.sub('', match).lstrip('/').replace('/', '.')
|
python
|
def _module_name(filename):
"""
Try to find a module name for a file path
by stripping off a prefix found in sys.modules.
"""
absfile = os.path.abspath(filename)
match = filename
for base in [''] + sys.path:
base = os.path.abspath(base)
if absfile.startswith(base):
match = absfile[len(base):]
break
return SUFFIX_RE.sub('', match).lstrip('/').replace('/', '.')
|
[
"def",
"_module_name",
"(",
"filename",
")",
":",
"absfile",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"filename",
")",
"match",
"=",
"filename",
"for",
"base",
"in",
"[",
"''",
"]",
"+",
"sys",
".",
"path",
":",
"base",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"base",
")",
"if",
"absfile",
".",
"startswith",
"(",
"base",
")",
":",
"match",
"=",
"absfile",
"[",
"len",
"(",
"base",
")",
":",
"]",
"break",
"return",
"SUFFIX_RE",
".",
"sub",
"(",
"''",
",",
"match",
")",
".",
"lstrip",
"(",
"'/'",
")",
".",
"replace",
"(",
"'/'",
",",
"'.'",
")"
] |
Try to find a module name for a file path
by stripping off a prefix found in sys.modules.
|
[
"Try",
"to",
"find",
"a",
"module",
"name",
"for",
"a",
"file",
"path",
"by",
"stripping",
"off",
"a",
"prefix",
"found",
"in",
"sys",
".",
"modules",
"."
] |
65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb
|
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L140-L155
|
train
|
ChrisBeaumont/smother
|
smother/python.py
|
PythonFile.from_modulename
|
def from_modulename(cls, module_name):
"""
Build a PythonFile given a dotted module name like a.b.c
"""
# XXX make this more robust (pyc files? zip archives? etc)
slug = module_name.replace('.', '/')
paths = [slug + '.py', slug + '/__init__.py']
# always search from current directory
for base in [''] + sys.path:
for path in paths:
fullpath = os.path.join(base, path)
if os.path.exists(fullpath):
return cls(fullpath, prefix=module_name)
else:
raise ValueError("Module not found: %s" % module_name)
|
python
|
def from_modulename(cls, module_name):
"""
Build a PythonFile given a dotted module name like a.b.c
"""
# XXX make this more robust (pyc files? zip archives? etc)
slug = module_name.replace('.', '/')
paths = [slug + '.py', slug + '/__init__.py']
# always search from current directory
for base in [''] + sys.path:
for path in paths:
fullpath = os.path.join(base, path)
if os.path.exists(fullpath):
return cls(fullpath, prefix=module_name)
else:
raise ValueError("Module not found: %s" % module_name)
|
[
"def",
"from_modulename",
"(",
"cls",
",",
"module_name",
")",
":",
"# XXX make this more robust (pyc files? zip archives? etc)",
"slug",
"=",
"module_name",
".",
"replace",
"(",
"'.'",
",",
"'/'",
")",
"paths",
"=",
"[",
"slug",
"+",
"'.py'",
",",
"slug",
"+",
"'/__init__.py'",
"]",
"# always search from current directory",
"for",
"base",
"in",
"[",
"''",
"]",
"+",
"sys",
".",
"path",
":",
"for",
"path",
"in",
"paths",
":",
"fullpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base",
",",
"path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"fullpath",
")",
":",
"return",
"cls",
"(",
"fullpath",
",",
"prefix",
"=",
"module_name",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Module not found: %s\"",
"%",
"module_name",
")"
] |
Build a PythonFile given a dotted module name like a.b.c
|
[
"Build",
"a",
"PythonFile",
"given",
"a",
"dotted",
"module",
"name",
"like",
"a",
".",
"b",
".",
"c"
] |
65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb
|
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L158-L173
|
train
|
ChrisBeaumont/smother
|
smother/python.py
|
PythonFile.context_range
|
def context_range(self, context):
"""
Return the 1-offset, right-open range of lines spanned by
a particular context name.
Parameters
----------
context : str
Raises
------
ValueError, if context is not present in the file.
"""
if not context.startswith(self.prefix):
context = self.prefix + '.' + context
lo = hi = None
for idx, line_context in enumerate(self.lines, 1):
# context is hierarchical -- context spans itself
# and any suffix.
if line_context.startswith(context):
lo = lo or idx
hi = idx
if lo is None:
raise ValueError("Context %s does not exist in file %s" %
(context, self.filename))
return lo, hi + 1
|
python
|
def context_range(self, context):
"""
Return the 1-offset, right-open range of lines spanned by
a particular context name.
Parameters
----------
context : str
Raises
------
ValueError, if context is not present in the file.
"""
if not context.startswith(self.prefix):
context = self.prefix + '.' + context
lo = hi = None
for idx, line_context in enumerate(self.lines, 1):
# context is hierarchical -- context spans itself
# and any suffix.
if line_context.startswith(context):
lo = lo or idx
hi = idx
if lo is None:
raise ValueError("Context %s does not exist in file %s" %
(context, self.filename))
return lo, hi + 1
|
[
"def",
"context_range",
"(",
"self",
",",
"context",
")",
":",
"if",
"not",
"context",
".",
"startswith",
"(",
"self",
".",
"prefix",
")",
":",
"context",
"=",
"self",
".",
"prefix",
"+",
"'.'",
"+",
"context",
"lo",
"=",
"hi",
"=",
"None",
"for",
"idx",
",",
"line_context",
"in",
"enumerate",
"(",
"self",
".",
"lines",
",",
"1",
")",
":",
"# context is hierarchical -- context spans itself",
"# and any suffix.",
"if",
"line_context",
".",
"startswith",
"(",
"context",
")",
":",
"lo",
"=",
"lo",
"or",
"idx",
"hi",
"=",
"idx",
"if",
"lo",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Context %s does not exist in file %s\"",
"%",
"(",
"context",
",",
"self",
".",
"filename",
")",
")",
"return",
"lo",
",",
"hi",
"+",
"1"
] |
Return the 1-offset, right-open range of lines spanned by
a particular context name.
Parameters
----------
context : str
Raises
------
ValueError, if context is not present in the file.
|
[
"Return",
"the",
"1",
"-",
"offset",
"right",
"-",
"open",
"range",
"of",
"lines",
"spanned",
"by",
"a",
"particular",
"context",
"name",
"."
] |
65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb
|
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L179-L208
|
train
|
ChrisBeaumont/smother
|
smother/python.py
|
PythonFile.context
|
def context(self, line):
"""
Return the context for a given 1-offset line number.
"""
# XXX due to a limitation in Visitor,
# non-python code after the last python code
# in a file is not added to self.lines, so we
# have to guard against IndexErrors.
idx = line - 1
if idx >= len(self.lines):
return self.prefix
return self.lines[idx]
|
python
|
def context(self, line):
"""
Return the context for a given 1-offset line number.
"""
# XXX due to a limitation in Visitor,
# non-python code after the last python code
# in a file is not added to self.lines, so we
# have to guard against IndexErrors.
idx = line - 1
if idx >= len(self.lines):
return self.prefix
return self.lines[idx]
|
[
"def",
"context",
"(",
"self",
",",
"line",
")",
":",
"# XXX due to a limitation in Visitor,",
"# non-python code after the last python code",
"# in a file is not added to self.lines, so we",
"# have to guard against IndexErrors.",
"idx",
"=",
"line",
"-",
"1",
"if",
"idx",
">=",
"len",
"(",
"self",
".",
"lines",
")",
":",
"return",
"self",
".",
"prefix",
"return",
"self",
".",
"lines",
"[",
"idx",
"]"
] |
Return the context for a given 1-offset line number.
|
[
"Return",
"the",
"context",
"for",
"a",
"given",
"1",
"-",
"offset",
"line",
"number",
"."
] |
65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb
|
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L210-L221
|
train
|
infothrill/python-launchd
|
launchd/plist.py
|
write
|
def write(label, plist, scope=USER):
'''
Writes the given property list to the appropriate file on disk and returns
the absolute filename.
:param plist: dict
:param label: string
:param scope: oneOf(USER, USER_ADMIN, DAEMON_ADMIN, USER_OS, DAEMON_OS)
'''
fname = compute_filename(label, scope)
with open(fname, "wb") as f:
plistlib.writePlist(plist, f)
return fname
|
python
|
def write(label, plist, scope=USER):
'''
Writes the given property list to the appropriate file on disk and returns
the absolute filename.
:param plist: dict
:param label: string
:param scope: oneOf(USER, USER_ADMIN, DAEMON_ADMIN, USER_OS, DAEMON_OS)
'''
fname = compute_filename(label, scope)
with open(fname, "wb") as f:
plistlib.writePlist(plist, f)
return fname
|
[
"def",
"write",
"(",
"label",
",",
"plist",
",",
"scope",
"=",
"USER",
")",
":",
"fname",
"=",
"compute_filename",
"(",
"label",
",",
"scope",
")",
"with",
"open",
"(",
"fname",
",",
"\"wb\"",
")",
"as",
"f",
":",
"plistlib",
".",
"writePlist",
"(",
"plist",
",",
"f",
")",
"return",
"fname"
] |
Writes the given property list to the appropriate file on disk and returns
the absolute filename.
:param plist: dict
:param label: string
:param scope: oneOf(USER, USER_ADMIN, DAEMON_ADMIN, USER_OS, DAEMON_OS)
|
[
"Writes",
"the",
"given",
"property",
"list",
"to",
"the",
"appropriate",
"file",
"on",
"disk",
"and",
"returns",
"the",
"absolute",
"filename",
"."
] |
2cd50579e808851b116f5a26f9b871a32b65ce0e
|
https://github.com/infothrill/python-launchd/blob/2cd50579e808851b116f5a26f9b871a32b65ce0e/launchd/plist.py#L49-L61
|
train
|
SHDShim/pytheos
|
pytheos/eqn_therm.py
|
alphakt_pth
|
def alphakt_pth(v, temp, v0, alpha0, k0, n, z, t_ref=300.,
three_r=3. * constants.R):
"""
calculate thermal pressure from thermal expansion and bulk modulus
:param v: unit-cell volume in A^3
:param temp: temperature in K
:param v0: unit-cell volume in A^3 at 1 bar
:param alpha0: thermal expansion parameter at 1 bar in K-1
:param k0: bulk modulus in GPa
:param n: number of atoms in a formula unit
:param z: number of formula unit in a unit cell
:param t_ref: reference temperature
:param three_r: 3R in case adjustment is needed
:return: thermal pressure in GPa
"""
return alpha0 * k0 * (temp - t_ref)
|
python
|
def alphakt_pth(v, temp, v0, alpha0, k0, n, z, t_ref=300.,
three_r=3. * constants.R):
"""
calculate thermal pressure from thermal expansion and bulk modulus
:param v: unit-cell volume in A^3
:param temp: temperature in K
:param v0: unit-cell volume in A^3 at 1 bar
:param alpha0: thermal expansion parameter at 1 bar in K-1
:param k0: bulk modulus in GPa
:param n: number of atoms in a formula unit
:param z: number of formula unit in a unit cell
:param t_ref: reference temperature
:param three_r: 3R in case adjustment is needed
:return: thermal pressure in GPa
"""
return alpha0 * k0 * (temp - t_ref)
|
[
"def",
"alphakt_pth",
"(",
"v",
",",
"temp",
",",
"v0",
",",
"alpha0",
",",
"k0",
",",
"n",
",",
"z",
",",
"t_ref",
"=",
"300.",
",",
"three_r",
"=",
"3.",
"*",
"constants",
".",
"R",
")",
":",
"return",
"alpha0",
"*",
"k0",
"*",
"(",
"temp",
"-",
"t_ref",
")"
] |
calculate thermal pressure from thermal expansion and bulk modulus
:param v: unit-cell volume in A^3
:param temp: temperature in K
:param v0: unit-cell volume in A^3 at 1 bar
:param alpha0: thermal expansion parameter at 1 bar in K-1
:param k0: bulk modulus in GPa
:param n: number of atoms in a formula unit
:param z: number of formula unit in a unit cell
:param t_ref: reference temperature
:param three_r: 3R in case adjustment is needed
:return: thermal pressure in GPa
|
[
"calculate",
"thermal",
"pressure",
"from",
"thermal",
"expansion",
"and",
"bulk",
"modulus"
] |
be079624405e92fbec60c5ead253eb5917e55237
|
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_therm.py#L5-L21
|
train
|
aiidateam/aiida-codtools
|
aiida_codtools/parsers/cif_cod_deposit.py
|
CifCodDepositParser._get_output_nodes
|
def _get_output_nodes(self, output_path, error_path):
"""
Extracts output nodes from the standard output and standard error files
"""
status = cod_deposition_states.UNKNOWN
messages = []
if output_path is not None:
content = None
with open(output_path) as f:
content = f.read()
status, message = CifCodDepositParser._deposit_result(content)
messages.extend(message.split('\n'))
if error_path is not None:
with open(error_path) as f:
content = f.readlines()
lines = [x.strip('\n') for x in content]
messages.extend(lines)
parameters = {'output_messages': messages, 'status': status}
output_nodes = []
output_nodes.append(('messages', Dict(dict=parameters)))
if status == cod_deposition_states.SUCCESS:
return True, output_nodes
return False, output_nodes
|
python
|
def _get_output_nodes(self, output_path, error_path):
"""
Extracts output nodes from the standard output and standard error files
"""
status = cod_deposition_states.UNKNOWN
messages = []
if output_path is not None:
content = None
with open(output_path) as f:
content = f.read()
status, message = CifCodDepositParser._deposit_result(content)
messages.extend(message.split('\n'))
if error_path is not None:
with open(error_path) as f:
content = f.readlines()
lines = [x.strip('\n') for x in content]
messages.extend(lines)
parameters = {'output_messages': messages, 'status': status}
output_nodes = []
output_nodes.append(('messages', Dict(dict=parameters)))
if status == cod_deposition_states.SUCCESS:
return True, output_nodes
return False, output_nodes
|
[
"def",
"_get_output_nodes",
"(",
"self",
",",
"output_path",
",",
"error_path",
")",
":",
"status",
"=",
"cod_deposition_states",
".",
"UNKNOWN",
"messages",
"=",
"[",
"]",
"if",
"output_path",
"is",
"not",
"None",
":",
"content",
"=",
"None",
"with",
"open",
"(",
"output_path",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"status",
",",
"message",
"=",
"CifCodDepositParser",
".",
"_deposit_result",
"(",
"content",
")",
"messages",
".",
"extend",
"(",
"message",
".",
"split",
"(",
"'\\n'",
")",
")",
"if",
"error_path",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"error_path",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"readlines",
"(",
")",
"lines",
"=",
"[",
"x",
".",
"strip",
"(",
"'\\n'",
")",
"for",
"x",
"in",
"content",
"]",
"messages",
".",
"extend",
"(",
"lines",
")",
"parameters",
"=",
"{",
"'output_messages'",
":",
"messages",
",",
"'status'",
":",
"status",
"}",
"output_nodes",
"=",
"[",
"]",
"output_nodes",
".",
"append",
"(",
"(",
"'messages'",
",",
"Dict",
"(",
"dict",
"=",
"parameters",
")",
")",
")",
"if",
"status",
"==",
"cod_deposition_states",
".",
"SUCCESS",
":",
"return",
"True",
",",
"output_nodes",
"return",
"False",
",",
"output_nodes"
] |
Extracts output nodes from the standard output and standard error files
|
[
"Extracts",
"output",
"nodes",
"from",
"the",
"standard",
"output",
"and",
"standard",
"error",
"files"
] |
da5e4259b7a2e86cf0cc3f997e11dd36d445fa94
|
https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/parsers/cif_cod_deposit.py#L35-L63
|
train
|
chaoss/grimoirelab-cereslib
|
cereslib/dfutils/filter.py
|
FilterRows.filter_
|
def filter_(self, columns, value):
""" This method filter some of the rows where the 'value'
is found in each of the 'columns'.
:param column: list of strings
:param value: any type
:returns: filtered dataframe
:rtype: pandas.DataFrame
"""
for column in columns:
if column not in self.data.columns:
raise ValueError("Column %s not in DataFrame columns: %s" % (column, list(self.data)))
for column in columns:
# Filtering on empty data series doesn't make sense at all and also would raise an error
column_len = len(self.data[column])
if column_len > 0 and column_len != self.data[column].isnull().sum():
self.data = self.data[self.data[column] != value]
return self.data
|
python
|
def filter_(self, columns, value):
""" This method filter some of the rows where the 'value'
is found in each of the 'columns'.
:param column: list of strings
:param value: any type
:returns: filtered dataframe
:rtype: pandas.DataFrame
"""
for column in columns:
if column not in self.data.columns:
raise ValueError("Column %s not in DataFrame columns: %s" % (column, list(self.data)))
for column in columns:
# Filtering on empty data series doesn't make sense at all and also would raise an error
column_len = len(self.data[column])
if column_len > 0 and column_len != self.data[column].isnull().sum():
self.data = self.data[self.data[column] != value]
return self.data
|
[
"def",
"filter_",
"(",
"self",
",",
"columns",
",",
"value",
")",
":",
"for",
"column",
"in",
"columns",
":",
"if",
"column",
"not",
"in",
"self",
".",
"data",
".",
"columns",
":",
"raise",
"ValueError",
"(",
"\"Column %s not in DataFrame columns: %s\"",
"%",
"(",
"column",
",",
"list",
"(",
"self",
".",
"data",
")",
")",
")",
"for",
"column",
"in",
"columns",
":",
"# Filtering on empty data series doesn't make sense at all and also would raise an error",
"column_len",
"=",
"len",
"(",
"self",
".",
"data",
"[",
"column",
"]",
")",
"if",
"column_len",
">",
"0",
"and",
"column_len",
"!=",
"self",
".",
"data",
"[",
"column",
"]",
".",
"isnull",
"(",
")",
".",
"sum",
"(",
")",
":",
"self",
".",
"data",
"=",
"self",
".",
"data",
"[",
"self",
".",
"data",
"[",
"column",
"]",
"!=",
"value",
"]",
"return",
"self",
".",
"data"
] |
This method filter some of the rows where the 'value'
is found in each of the 'columns'.
:param column: list of strings
:param value: any type
:returns: filtered dataframe
:rtype: pandas.DataFrame
|
[
"This",
"method",
"filter",
"some",
"of",
"the",
"rows",
"where",
"the",
"value",
"is",
"found",
"in",
"each",
"of",
"the",
"columns",
"."
] |
5110e6ca490a4f24bec3124286ebf51fd4e08bdd
|
https://github.com/chaoss/grimoirelab-cereslib/blob/5110e6ca490a4f24bec3124286ebf51fd4e08bdd/cereslib/dfutils/filter.py#L54-L75
|
train
|
crdoconnor/commandlib
|
commandlib/utils.py
|
_check_directory
|
def _check_directory(directory):
"""Raise exception if directory does not exist."""
if directory is not None:
if not exists(directory):
raise CommandError(
"Cannot run command - directory {0} does not exist".format(directory)
)
if not isdir(directory):
raise CommandError(
"Cannot run command - specified directory {0} is not a directory.".format(
directory
)
)
|
python
|
def _check_directory(directory):
"""Raise exception if directory does not exist."""
if directory is not None:
if not exists(directory):
raise CommandError(
"Cannot run command - directory {0} does not exist".format(directory)
)
if not isdir(directory):
raise CommandError(
"Cannot run command - specified directory {0} is not a directory.".format(
directory
)
)
|
[
"def",
"_check_directory",
"(",
"directory",
")",
":",
"if",
"directory",
"is",
"not",
"None",
":",
"if",
"not",
"exists",
"(",
"directory",
")",
":",
"raise",
"CommandError",
"(",
"\"Cannot run command - directory {0} does not exist\"",
".",
"format",
"(",
"directory",
")",
")",
"if",
"not",
"isdir",
"(",
"directory",
")",
":",
"raise",
"CommandError",
"(",
"\"Cannot run command - specified directory {0} is not a directory.\"",
".",
"format",
"(",
"directory",
")",
")"
] |
Raise exception if directory does not exist.
|
[
"Raise",
"exception",
"if",
"directory",
"does",
"not",
"exist",
"."
] |
b630364fd7b0d189b388e22a7f43235d182e12e4
|
https://github.com/crdoconnor/commandlib/blob/b630364fd7b0d189b388e22a7f43235d182e12e4/commandlib/utils.py#L5-L18
|
train
|
totalgood/twip
|
twip/features.py
|
load_tweets
|
def load_tweets(filename='tweets.zip'):
r"""Extract the cached tweets "database" if necessary and load + parse the json.
>>> js = load_tweets()
>>> len(js)
8000
>>> js[0].keys()
[u'contributors',
u'truncated',
u'text',
u'is_quote_status',
u'in_reply_to_status_id',
u'id',
u'favorite_count',
u'source',
u'retweeted',
u'coordinates',
u'entities',
u'in_reply_to_screen_name',
u'id_str',
u'retweet_count',
u'in_reply_to_user_id',
u'favorited',
u'user',
u'geo',
u'in_reply_to_user_id_str',
u'possibly_sensitive',
u'lang',
u'created_at',
u'in_reply_to_status_id_str',
u'place',
u'metadata']
>>> print(json.dumps((obj for obj in js if obj['geo'] is not None).next(), indent=4))
{
"contributors": null,
"truncated": false,
"text": "See our latest #Sacramento, CA #job and click to apply: Python Software Engineer - https://t.co/yimTIlISE0 #IT #Hiring #CareerArc",
"is_quote_status": false,
"in_reply_to_status_id": null,
"id": 674998672136929280,
"favorite_count": 0,
"source": "<a href=\"http://www.tweetmyjobs.com\" rel=\"nofollow\">TweetMyJOBS</a>",
"retweeted": false,
"coordinates": {
"type": "Point",
"coordinates": [
-121.4399041,
38.5963157
]
},
"entities": {
"symbols": [],
"user_mentions": [],
"hashtags": [
{
"indices": [
15,
26
],
"text": "Sacramento"
},
{
"indices": [
31,
35
],
"text": "job"
},
{
"indices": [
107,
110
],
"text": "IT"
},
{
"indices": [
111,
118
],
"text": "Hiring"
},
{
"indices": [
119,
129
],
"text": "CareerArc"
}
],
"urls": [
{
"url": "https://t.co/yimTIlISE0",
"indices": [
83,
106
],
"expanded_url": "http://bit.ly/1OTNflo",
"display_url": "bit.ly/1OTNflo"
}
]
},
"in_reply_to_screen_name": null,
"id_str": "674998672136929280",
"retweet_count": 0,
"in_reply_to_user_id": null,
"favorited": false,
"user": {
"follow_request_sent": null,
"has_extended_profile": false,
"profile_use_background_image": true,
"id": 22634351,
"verified": false,
"profile_text_color": "000000",
"profile_image_url_https": "https://pbs.twimg.com/profile_images/670049883869458435/J_Klv-BV_normal.jpg",
"profile_sidebar_fill_color": "407DB0",
"is_translator": false,
"geo_enabled": true,
"entities": {
"url": {
"urls": [
{
"url": "https://t.co/DByWt45HZj",
"indices": [
0,
23
],
"expanded_url": "http://www.careerarc.com/job-seeker",
"display_url": "careerarc.com/job-seeker"
}
]
},
"description": {
"urls": []
}
},
"followers_count": 452,
"protected": false,
"location": "Sacramento, CA",
"default_profile_image": false,
"id_str": "22634351",
"lang": "en",
"utc_offset": -18000,
"statuses_count": 157,
"description": "Follow this account for geo-targeted Software Dev.
- General/IT job tweets in Sacramento, CA. Need help? Tweet us at @CareerArc!",
"friends_count": 326,
"profile_link_color": "4A913C",
"profile_image_url": "http://pbs.twimg.com/profile_images/670049883869458435/J_Klv-BV_normal.jpg",
"notifications": null,
"profile_background_image_url_https": "https://pbs.twimg.com/profile_background_images/315958568/Twitter-BG_2_bg-image.jpg",
"profile_background_color": "253956",
"profile_banner_url": "https://pbs.twimg.com/profile_banners/22634351/1448587317",
"profile_background_image_url": "http://pbs.twimg.com/profile_background_images/315958568/Twitter-BG_2_bg-image.jpg",
"name": "TMJ-SAC IT Jobs",
"is_translation_enabled": false,
"profile_background_tile": false,
"favourites_count": 0,
"screen_name": "tmj_sac_it",
"url": "https://t.co/DByWt45HZj",
"created_at": "Tue Mar 03 15:28:22 +0000 2009",
"contributors_enabled": false,
"time_zone": "Eastern Time (US & Canada)",
"profile_sidebar_border_color": "000000",
"default_profile": false,
"following": null,
"listed_count": 36
},
"geo": {
"type": "Point",
"coordinates": [
38.5963157,
-121.4399041
]
},
"in_reply_to_user_id_str": null,
"possibly_sensitive": false,
"lang": "en",
"created_at": "Thu Dec 10 17:06:38 +0000 2015",
"in_reply_to_status_id_str": null,
"place": {
"country_code": "US",
"url": "https://api.twitter.com/1.1/geo/id/b71fac2ee9792cbe.json",
"country": "United States",
"place_type": "city",
"bounding_box": {
"type": "Polygon",
"coordinates": [
[
[
-121.576613,
38.43792
],
[
-121.362715,
38.43792
],
[
-121.362715,
38.685512
],
[
-121.576613,
38.685512
]
]
]
},
"contained_within": [],
"full_name": "Sacramento, CA",
"attributes": {},
"id": "b71fac2ee9792cbe",
"name": "Sacramento"
},
"metadata": {
"iso_language_code": "en",
"result_type": "recent"
}
}
"""
basename, ext = os.path.splitext(filename)
json_file = basename + '.json'
json_path = os.path.join(DATA_PATH, json_file)
zip_path = os.path.join(DATA_PATH, basename + '.zip')
if not os.path.isfile(json_path):
zf = ZipFile(zip_path, 'r')
zf.extract(json_file, DATA_PATH)
with open(json_path, 'rUb') as f:
return json.load(f)
|
python
|
def load_tweets(filename='tweets.zip'):
r"""Extract the cached tweets "database" if necessary and load + parse the json.
>>> js = load_tweets()
>>> len(js)
8000
>>> js[0].keys()
[u'contributors',
u'truncated',
u'text',
u'is_quote_status',
u'in_reply_to_status_id',
u'id',
u'favorite_count',
u'source',
u'retweeted',
u'coordinates',
u'entities',
u'in_reply_to_screen_name',
u'id_str',
u'retweet_count',
u'in_reply_to_user_id',
u'favorited',
u'user',
u'geo',
u'in_reply_to_user_id_str',
u'possibly_sensitive',
u'lang',
u'created_at',
u'in_reply_to_status_id_str',
u'place',
u'metadata']
>>> print(json.dumps((obj for obj in js if obj['geo'] is not None).next(), indent=4))
{
"contributors": null,
"truncated": false,
"text": "See our latest #Sacramento, CA #job and click to apply: Python Software Engineer - https://t.co/yimTIlISE0 #IT #Hiring #CareerArc",
"is_quote_status": false,
"in_reply_to_status_id": null,
"id": 674998672136929280,
"favorite_count": 0,
"source": "<a href=\"http://www.tweetmyjobs.com\" rel=\"nofollow\">TweetMyJOBS</a>",
"retweeted": false,
"coordinates": {
"type": "Point",
"coordinates": [
-121.4399041,
38.5963157
]
},
"entities": {
"symbols": [],
"user_mentions": [],
"hashtags": [
{
"indices": [
15,
26
],
"text": "Sacramento"
},
{
"indices": [
31,
35
],
"text": "job"
},
{
"indices": [
107,
110
],
"text": "IT"
},
{
"indices": [
111,
118
],
"text": "Hiring"
},
{
"indices": [
119,
129
],
"text": "CareerArc"
}
],
"urls": [
{
"url": "https://t.co/yimTIlISE0",
"indices": [
83,
106
],
"expanded_url": "http://bit.ly/1OTNflo",
"display_url": "bit.ly/1OTNflo"
}
]
},
"in_reply_to_screen_name": null,
"id_str": "674998672136929280",
"retweet_count": 0,
"in_reply_to_user_id": null,
"favorited": false,
"user": {
"follow_request_sent": null,
"has_extended_profile": false,
"profile_use_background_image": true,
"id": 22634351,
"verified": false,
"profile_text_color": "000000",
"profile_image_url_https": "https://pbs.twimg.com/profile_images/670049883869458435/J_Klv-BV_normal.jpg",
"profile_sidebar_fill_color": "407DB0",
"is_translator": false,
"geo_enabled": true,
"entities": {
"url": {
"urls": [
{
"url": "https://t.co/DByWt45HZj",
"indices": [
0,
23
],
"expanded_url": "http://www.careerarc.com/job-seeker",
"display_url": "careerarc.com/job-seeker"
}
]
},
"description": {
"urls": []
}
},
"followers_count": 452,
"protected": false,
"location": "Sacramento, CA",
"default_profile_image": false,
"id_str": "22634351",
"lang": "en",
"utc_offset": -18000,
"statuses_count": 157,
"description": "Follow this account for geo-targeted Software Dev.
- General/IT job tweets in Sacramento, CA. Need help? Tweet us at @CareerArc!",
"friends_count": 326,
"profile_link_color": "4A913C",
"profile_image_url": "http://pbs.twimg.com/profile_images/670049883869458435/J_Klv-BV_normal.jpg",
"notifications": null,
"profile_background_image_url_https": "https://pbs.twimg.com/profile_background_images/315958568/Twitter-BG_2_bg-image.jpg",
"profile_background_color": "253956",
"profile_banner_url": "https://pbs.twimg.com/profile_banners/22634351/1448587317",
"profile_background_image_url": "http://pbs.twimg.com/profile_background_images/315958568/Twitter-BG_2_bg-image.jpg",
"name": "TMJ-SAC IT Jobs",
"is_translation_enabled": false,
"profile_background_tile": false,
"favourites_count": 0,
"screen_name": "tmj_sac_it",
"url": "https://t.co/DByWt45HZj",
"created_at": "Tue Mar 03 15:28:22 +0000 2009",
"contributors_enabled": false,
"time_zone": "Eastern Time (US & Canada)",
"profile_sidebar_border_color": "000000",
"default_profile": false,
"following": null,
"listed_count": 36
},
"geo": {
"type": "Point",
"coordinates": [
38.5963157,
-121.4399041
]
},
"in_reply_to_user_id_str": null,
"possibly_sensitive": false,
"lang": "en",
"created_at": "Thu Dec 10 17:06:38 +0000 2015",
"in_reply_to_status_id_str": null,
"place": {
"country_code": "US",
"url": "https://api.twitter.com/1.1/geo/id/b71fac2ee9792cbe.json",
"country": "United States",
"place_type": "city",
"bounding_box": {
"type": "Polygon",
"coordinates": [
[
[
-121.576613,
38.43792
],
[
-121.362715,
38.43792
],
[
-121.362715,
38.685512
],
[
-121.576613,
38.685512
]
]
]
},
"contained_within": [],
"full_name": "Sacramento, CA",
"attributes": {},
"id": "b71fac2ee9792cbe",
"name": "Sacramento"
},
"metadata": {
"iso_language_code": "en",
"result_type": "recent"
}
}
"""
basename, ext = os.path.splitext(filename)
json_file = basename + '.json'
json_path = os.path.join(DATA_PATH, json_file)
zip_path = os.path.join(DATA_PATH, basename + '.zip')
if not os.path.isfile(json_path):
zf = ZipFile(zip_path, 'r')
zf.extract(json_file, DATA_PATH)
with open(json_path, 'rUb') as f:
return json.load(f)
|
[
"def",
"load_tweets",
"(",
"filename",
"=",
"'tweets.zip'",
")",
":",
"basename",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"json_file",
"=",
"basename",
"+",
"'.json'",
"json_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"DATA_PATH",
",",
"json_file",
")",
"zip_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"DATA_PATH",
",",
"basename",
"+",
"'.zip'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"json_path",
")",
":",
"zf",
"=",
"ZipFile",
"(",
"zip_path",
",",
"'r'",
")",
"zf",
".",
"extract",
"(",
"json_file",
",",
"DATA_PATH",
")",
"with",
"open",
"(",
"json_path",
",",
"'rUb'",
")",
"as",
"f",
":",
"return",
"json",
".",
"load",
"(",
"f",
")"
] |
r"""Extract the cached tweets "database" if necessary and load + parse the json.
>>> js = load_tweets()
>>> len(js)
8000
>>> js[0].keys()
[u'contributors',
u'truncated',
u'text',
u'is_quote_status',
u'in_reply_to_status_id',
u'id',
u'favorite_count',
u'source',
u'retweeted',
u'coordinates',
u'entities',
u'in_reply_to_screen_name',
u'id_str',
u'retweet_count',
u'in_reply_to_user_id',
u'favorited',
u'user',
u'geo',
u'in_reply_to_user_id_str',
u'possibly_sensitive',
u'lang',
u'created_at',
u'in_reply_to_status_id_str',
u'place',
u'metadata']
>>> print(json.dumps((obj for obj in js if obj['geo'] is not None).next(), indent=4))
{
"contributors": null,
"truncated": false,
"text": "See our latest #Sacramento, CA #job and click to apply: Python Software Engineer - https://t.co/yimTIlISE0 #IT #Hiring #CareerArc",
"is_quote_status": false,
"in_reply_to_status_id": null,
"id": 674998672136929280,
"favorite_count": 0,
"source": "<a href=\"http://www.tweetmyjobs.com\" rel=\"nofollow\">TweetMyJOBS</a>",
"retweeted": false,
"coordinates": {
"type": "Point",
"coordinates": [
-121.4399041,
38.5963157
]
},
"entities": {
"symbols": [],
"user_mentions": [],
"hashtags": [
{
"indices": [
15,
26
],
"text": "Sacramento"
},
{
"indices": [
31,
35
],
"text": "job"
},
{
"indices": [
107,
110
],
"text": "IT"
},
{
"indices": [
111,
118
],
"text": "Hiring"
},
{
"indices": [
119,
129
],
"text": "CareerArc"
}
],
"urls": [
{
"url": "https://t.co/yimTIlISE0",
"indices": [
83,
106
],
"expanded_url": "http://bit.ly/1OTNflo",
"display_url": "bit.ly/1OTNflo"
}
]
},
"in_reply_to_screen_name": null,
"id_str": "674998672136929280",
"retweet_count": 0,
"in_reply_to_user_id": null,
"favorited": false,
"user": {
"follow_request_sent": null,
"has_extended_profile": false,
"profile_use_background_image": true,
"id": 22634351,
"verified": false,
"profile_text_color": "000000",
"profile_image_url_https": "https://pbs.twimg.com/profile_images/670049883869458435/J_Klv-BV_normal.jpg",
"profile_sidebar_fill_color": "407DB0",
"is_translator": false,
"geo_enabled": true,
"entities": {
"url": {
"urls": [
{
"url": "https://t.co/DByWt45HZj",
"indices": [
0,
23
],
"expanded_url": "http://www.careerarc.com/job-seeker",
"display_url": "careerarc.com/job-seeker"
}
]
},
"description": {
"urls": []
}
},
"followers_count": 452,
"protected": false,
"location": "Sacramento, CA",
"default_profile_image": false,
"id_str": "22634351",
"lang": "en",
"utc_offset": -18000,
"statuses_count": 157,
"description": "Follow this account for geo-targeted Software Dev.
- General/IT job tweets in Sacramento, CA. Need help? Tweet us at @CareerArc!",
"friends_count": 326,
"profile_link_color": "4A913C",
"profile_image_url": "http://pbs.twimg.com/profile_images/670049883869458435/J_Klv-BV_normal.jpg",
"notifications": null,
"profile_background_image_url_https": "https://pbs.twimg.com/profile_background_images/315958568/Twitter-BG_2_bg-image.jpg",
"profile_background_color": "253956",
"profile_banner_url": "https://pbs.twimg.com/profile_banners/22634351/1448587317",
"profile_background_image_url": "http://pbs.twimg.com/profile_background_images/315958568/Twitter-BG_2_bg-image.jpg",
"name": "TMJ-SAC IT Jobs",
"is_translation_enabled": false,
"profile_background_tile": false,
"favourites_count": 0,
"screen_name": "tmj_sac_it",
"url": "https://t.co/DByWt45HZj",
"created_at": "Tue Mar 03 15:28:22 +0000 2009",
"contributors_enabled": false,
"time_zone": "Eastern Time (US & Canada)",
"profile_sidebar_border_color": "000000",
"default_profile": false,
"following": null,
"listed_count": 36
},
"geo": {
"type": "Point",
"coordinates": [
38.5963157,
-121.4399041
]
},
"in_reply_to_user_id_str": null,
"possibly_sensitive": false,
"lang": "en",
"created_at": "Thu Dec 10 17:06:38 +0000 2015",
"in_reply_to_status_id_str": null,
"place": {
"country_code": "US",
"url": "https://api.twitter.com/1.1/geo/id/b71fac2ee9792cbe.json",
"country": "United States",
"place_type": "city",
"bounding_box": {
"type": "Polygon",
"coordinates": [
[
[
-121.576613,
38.43792
],
[
-121.362715,
38.43792
],
[
-121.362715,
38.685512
],
[
-121.576613,
38.685512
]
]
]
},
"contained_within": [],
"full_name": "Sacramento, CA",
"attributes": {},
"id": "b71fac2ee9792cbe",
"name": "Sacramento"
},
"metadata": {
"iso_language_code": "en",
"result_type": "recent"
}
}
|
[
"r",
"Extract",
"the",
"cached",
"tweets",
"database",
"if",
"necessary",
"and",
"load",
"+",
"parse",
"the",
"json",
"."
] |
5c0411d2acfbe5b421841072814c9152591c03f7
|
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/features.py#L21-L249
|
train
|
totalgood/twip
|
twip/scripts/cat_tweets.py
|
main
|
def main(args):
"""API with args object containing configuration parameters"""
global logging, log
args = parse_args(args)
logging.basicConfig(format=LOG_FORMAT,
level=logging.DEBUG if args.verbose else logging.INFO,
stream=sys.stdout)
df = cat_tweets(path=args.path, verbosity=args.verbose + 1, numtweets=args.numtweets, ignore_suspicious=False)
log.info('Combined {} tweets'.format(len(df)))
df = drop_nan_columns(df)
save_tweets(df, path=args.path, filename=args.tweetfile)
geo = get_geo(df, path=args.path, filename=args.geofile)
log.info("Combined {} tweets into a single file {} and set asside {} geo tweets in {}".format(
len(df), args.tweetfile, len(geo), args.geofile))
return df, geo
|
python
|
def main(args):
"""API with args object containing configuration parameters"""
global logging, log
args = parse_args(args)
logging.basicConfig(format=LOG_FORMAT,
level=logging.DEBUG if args.verbose else logging.INFO,
stream=sys.stdout)
df = cat_tweets(path=args.path, verbosity=args.verbose + 1, numtweets=args.numtweets, ignore_suspicious=False)
log.info('Combined {} tweets'.format(len(df)))
df = drop_nan_columns(df)
save_tweets(df, path=args.path, filename=args.tweetfile)
geo = get_geo(df, path=args.path, filename=args.geofile)
log.info("Combined {} tweets into a single file {} and set asside {} geo tweets in {}".format(
len(df), args.tweetfile, len(geo), args.geofile))
return df, geo
|
[
"def",
"main",
"(",
"args",
")",
":",
"global",
"logging",
",",
"log",
"args",
"=",
"parse_args",
"(",
"args",
")",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"LOG_FORMAT",
",",
"level",
"=",
"logging",
".",
"DEBUG",
"if",
"args",
".",
"verbose",
"else",
"logging",
".",
"INFO",
",",
"stream",
"=",
"sys",
".",
"stdout",
")",
"df",
"=",
"cat_tweets",
"(",
"path",
"=",
"args",
".",
"path",
",",
"verbosity",
"=",
"args",
".",
"verbose",
"+",
"1",
",",
"numtweets",
"=",
"args",
".",
"numtweets",
",",
"ignore_suspicious",
"=",
"False",
")",
"log",
".",
"info",
"(",
"'Combined {} tweets'",
".",
"format",
"(",
"len",
"(",
"df",
")",
")",
")",
"df",
"=",
"drop_nan_columns",
"(",
"df",
")",
"save_tweets",
"(",
"df",
",",
"path",
"=",
"args",
".",
"path",
",",
"filename",
"=",
"args",
".",
"tweetfile",
")",
"geo",
"=",
"get_geo",
"(",
"df",
",",
"path",
"=",
"args",
".",
"path",
",",
"filename",
"=",
"args",
".",
"geofile",
")",
"log",
".",
"info",
"(",
"\"Combined {} tweets into a single file {} and set asside {} geo tweets in {}\"",
".",
"format",
"(",
"len",
"(",
"df",
")",
",",
"args",
".",
"tweetfile",
",",
"len",
"(",
"geo",
")",
",",
"args",
".",
"geofile",
")",
")",
"return",
"df",
",",
"geo"
] |
API with args object containing configuration parameters
|
[
"API",
"with",
"args",
"object",
"containing",
"configuration",
"parameters"
] |
5c0411d2acfbe5b421841072814c9152591c03f7
|
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/scripts/cat_tweets.py#L90-L105
|
train
|
totalgood/twip
|
twip/scripts/cat_tweets.py
|
drop_nan_columns
|
def drop_nan_columns(df, thresh=325):
"""Drop columns that are mostly NaNs
Excel files can only have 256 columns, so you may have to drop a lot in order to get down to this
"""
if thresh < 1:
thresh = int(thresh * df)
return df.dropna(axis=1, thresh=thresh, inplace=False)
|
python
|
def drop_nan_columns(df, thresh=325):
"""Drop columns that are mostly NaNs
Excel files can only have 256 columns, so you may have to drop a lot in order to get down to this
"""
if thresh < 1:
thresh = int(thresh * df)
return df.dropna(axis=1, thresh=thresh, inplace=False)
|
[
"def",
"drop_nan_columns",
"(",
"df",
",",
"thresh",
"=",
"325",
")",
":",
"if",
"thresh",
"<",
"1",
":",
"thresh",
"=",
"int",
"(",
"thresh",
"*",
"df",
")",
"return",
"df",
".",
"dropna",
"(",
"axis",
"=",
"1",
",",
"thresh",
"=",
"thresh",
",",
"inplace",
"=",
"False",
")"
] |
Drop columns that are mostly NaNs
Excel files can only have 256 columns, so you may have to drop a lot in order to get down to this
|
[
"Drop",
"columns",
"that",
"are",
"mostly",
"NaNs"
] |
5c0411d2acfbe5b421841072814c9152591c03f7
|
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/scripts/cat_tweets.py#L184-L191
|
train
|
jedie/PyHardLinkBackup
|
PyHardLinkBackup/phlb/phlb_main.py
|
FileBackup.fast_deduplication_backup
|
def fast_deduplication_backup(self, old_backup_entry, process_bar):
"""
We can just link a old backup entry
:param latest_backup: old BackupEntry model instance
:param process_bar: tqdm process bar
"""
# TODO: merge code with parts from deduplication_backup()
src_path = self.dir_path.resolved_path
log.debug("*** fast deduplication backup: '%s'", src_path)
old_file_path = old_backup_entry.get_backup_path()
if not self.path_helper.abs_dst_path.is_dir():
try:
self.path_helper.abs_dst_path.makedirs(mode=phlb_config.default_new_path_mode)
except OSError as err:
raise BackupFileError("Error creating out path: %s" % err)
else:
assert not self.path_helper.abs_dst_filepath.is_file(), (
"Out file already exists: %r" % self.path_helper.abs_src_filepath
)
with self.path_helper.abs_dst_hash_filepath.open("w") as hash_file:
try:
old_file_path.link(self.path_helper.abs_dst_filepath) # call os.link()
except OSError as err:
log.error("Can't link '%s' to '%s': %s" % (old_file_path, self.path_helper.abs_dst_filepath, err))
log.info("Mark %r with 'no link source'.", old_backup_entry)
old_backup_entry.no_link_source = True
old_backup_entry.save()
# do a normal copy backup
self.deduplication_backup(process_bar)
return
hash_hexdigest = old_backup_entry.content_info.hash_hexdigest
hash_file.write(hash_hexdigest)
file_size = self.dir_path.stat.st_size
if file_size > 0:
# tqdm will not accept 0 bytes files ;)
process_bar.update(file_size)
BackupEntry.objects.create(
backup_run=self.backup_run,
backup_entry_path=self.path_helper.abs_dst_filepath,
hash_hexdigest=hash_hexdigest,
)
if self._SIMULATE_SLOW_SPEED:
log.error("Slow down speed for tests!")
time.sleep(self._SIMULATE_SLOW_SPEED)
self.fast_backup = True # Was a fast backup used?
self.file_linked = True
|
python
|
def fast_deduplication_backup(self, old_backup_entry, process_bar):
"""
We can just link a old backup entry
:param latest_backup: old BackupEntry model instance
:param process_bar: tqdm process bar
"""
# TODO: merge code with parts from deduplication_backup()
src_path = self.dir_path.resolved_path
log.debug("*** fast deduplication backup: '%s'", src_path)
old_file_path = old_backup_entry.get_backup_path()
if not self.path_helper.abs_dst_path.is_dir():
try:
self.path_helper.abs_dst_path.makedirs(mode=phlb_config.default_new_path_mode)
except OSError as err:
raise BackupFileError("Error creating out path: %s" % err)
else:
assert not self.path_helper.abs_dst_filepath.is_file(), (
"Out file already exists: %r" % self.path_helper.abs_src_filepath
)
with self.path_helper.abs_dst_hash_filepath.open("w") as hash_file:
try:
old_file_path.link(self.path_helper.abs_dst_filepath) # call os.link()
except OSError as err:
log.error("Can't link '%s' to '%s': %s" % (old_file_path, self.path_helper.abs_dst_filepath, err))
log.info("Mark %r with 'no link source'.", old_backup_entry)
old_backup_entry.no_link_source = True
old_backup_entry.save()
# do a normal copy backup
self.deduplication_backup(process_bar)
return
hash_hexdigest = old_backup_entry.content_info.hash_hexdigest
hash_file.write(hash_hexdigest)
file_size = self.dir_path.stat.st_size
if file_size > 0:
# tqdm will not accept 0 bytes files ;)
process_bar.update(file_size)
BackupEntry.objects.create(
backup_run=self.backup_run,
backup_entry_path=self.path_helper.abs_dst_filepath,
hash_hexdigest=hash_hexdigest,
)
if self._SIMULATE_SLOW_SPEED:
log.error("Slow down speed for tests!")
time.sleep(self._SIMULATE_SLOW_SPEED)
self.fast_backup = True # Was a fast backup used?
self.file_linked = True
|
[
"def",
"fast_deduplication_backup",
"(",
"self",
",",
"old_backup_entry",
",",
"process_bar",
")",
":",
"# TODO: merge code with parts from deduplication_backup()",
"src_path",
"=",
"self",
".",
"dir_path",
".",
"resolved_path",
"log",
".",
"debug",
"(",
"\"*** fast deduplication backup: '%s'\"",
",",
"src_path",
")",
"old_file_path",
"=",
"old_backup_entry",
".",
"get_backup_path",
"(",
")",
"if",
"not",
"self",
".",
"path_helper",
".",
"abs_dst_path",
".",
"is_dir",
"(",
")",
":",
"try",
":",
"self",
".",
"path_helper",
".",
"abs_dst_path",
".",
"makedirs",
"(",
"mode",
"=",
"phlb_config",
".",
"default_new_path_mode",
")",
"except",
"OSError",
"as",
"err",
":",
"raise",
"BackupFileError",
"(",
"\"Error creating out path: %s\"",
"%",
"err",
")",
"else",
":",
"assert",
"not",
"self",
".",
"path_helper",
".",
"abs_dst_filepath",
".",
"is_file",
"(",
")",
",",
"(",
"\"Out file already exists: %r\"",
"%",
"self",
".",
"path_helper",
".",
"abs_src_filepath",
")",
"with",
"self",
".",
"path_helper",
".",
"abs_dst_hash_filepath",
".",
"open",
"(",
"\"w\"",
")",
"as",
"hash_file",
":",
"try",
":",
"old_file_path",
".",
"link",
"(",
"self",
".",
"path_helper",
".",
"abs_dst_filepath",
")",
"# call os.link()",
"except",
"OSError",
"as",
"err",
":",
"log",
".",
"error",
"(",
"\"Can't link '%s' to '%s': %s\"",
"%",
"(",
"old_file_path",
",",
"self",
".",
"path_helper",
".",
"abs_dst_filepath",
",",
"err",
")",
")",
"log",
".",
"info",
"(",
"\"Mark %r with 'no link source'.\"",
",",
"old_backup_entry",
")",
"old_backup_entry",
".",
"no_link_source",
"=",
"True",
"old_backup_entry",
".",
"save",
"(",
")",
"# do a normal copy backup",
"self",
".",
"deduplication_backup",
"(",
"process_bar",
")",
"return",
"hash_hexdigest",
"=",
"old_backup_entry",
".",
"content_info",
".",
"hash_hexdigest",
"hash_file",
".",
"write",
"(",
"hash_hexdigest",
")",
"file_size",
"=",
"self",
".",
"dir_path",
".",
"stat",
".",
"st_size",
"if",
"file_size",
">",
"0",
":",
"# tqdm will not accept 0 bytes files ;)",
"process_bar",
".",
"update",
"(",
"file_size",
")",
"BackupEntry",
".",
"objects",
".",
"create",
"(",
"backup_run",
"=",
"self",
".",
"backup_run",
",",
"backup_entry_path",
"=",
"self",
".",
"path_helper",
".",
"abs_dst_filepath",
",",
"hash_hexdigest",
"=",
"hash_hexdigest",
",",
")",
"if",
"self",
".",
"_SIMULATE_SLOW_SPEED",
":",
"log",
".",
"error",
"(",
"\"Slow down speed for tests!\"",
")",
"time",
".",
"sleep",
"(",
"self",
".",
"_SIMULATE_SLOW_SPEED",
")",
"self",
".",
"fast_backup",
"=",
"True",
"# Was a fast backup used?",
"self",
".",
"file_linked",
"=",
"True"
] |
We can just link a old backup entry
:param latest_backup: old BackupEntry model instance
:param process_bar: tqdm process bar
|
[
"We",
"can",
"just",
"link",
"a",
"old",
"backup",
"entry"
] |
be28666834d2d9e3d8aac1b661cb2d5bd4056c29
|
https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/phlb/phlb_main.py#L100-L154
|
train
|
jedie/PyHardLinkBackup
|
PyHardLinkBackup/phlb/phlb_main.py
|
FileBackup.deduplication_backup
|
def deduplication_backup(self, process_bar):
"""
Backup the current file and compare the content.
:param process_bar: tqdm process bar
"""
self.fast_backup = False # Was a fast backup used?
src_path = self.dir_path.resolved_path
log.debug("*** deduplication backup: '%s'", src_path)
log.debug("abs_src_filepath: '%s'", self.path_helper.abs_src_filepath)
log.debug("abs_dst_filepath: '%s'", self.path_helper.abs_dst_filepath)
log.debug("abs_dst_hash_filepath: '%s'", self.path_helper.abs_dst_hash_filepath)
log.debug("abs_dst_dir: '%s'", self.path_helper.abs_dst_path)
if not self.path_helper.abs_dst_path.is_dir():
try:
self.path_helper.abs_dst_path.makedirs(mode=phlb_config.default_new_path_mode)
except OSError as err:
raise BackupFileError("Error creating out path: %s" % err)
else:
assert not self.path_helper.abs_dst_filepath.is_file(), (
"Out file already exists: %r" % self.path_helper.abs_src_filepath
)
try:
try:
with self.path_helper.abs_src_filepath.open("rb") as in_file:
with self.path_helper.abs_dst_hash_filepath.open("w") as hash_file:
with self.path_helper.abs_dst_filepath.open("wb") as out_file:
hash = self._deduplication_backup(self.dir_path, in_file, out_file, process_bar)
hash_hexdigest = hash.hexdigest()
hash_file.write(hash_hexdigest)
except OSError as err:
# FIXME: Better error message
raise BackupFileError("Skip file %s error: %s" % (self.path_helper.abs_src_filepath, err))
except KeyboardInterrupt:
# Try to remove created files
try:
self.path_helper.abs_dst_filepath.unlink()
except OSError:
pass
try:
self.path_helper.abs_dst_hash_filepath.unlink()
except OSError:
pass
raise KeyboardInterrupt
old_backup_entry = deduplicate(self.path_helper.abs_dst_filepath, hash_hexdigest)
if old_backup_entry is None:
log.debug("File is unique.")
self.file_linked = False # Was a hardlink used?
else:
log.debug("File was deduplicated via hardlink to: %s" % old_backup_entry)
self.file_linked = True # Was a hardlink used?
# set origin access/modified times to the new created backup file
atime_ns = self.dir_path.stat.st_atime_ns
mtime_ns = self.dir_path.stat.st_mtime_ns
self.path_helper.abs_dst_filepath.utime(ns=(atime_ns, mtime_ns)) # call os.utime()
log.debug("Set mtime to: %s" % mtime_ns)
BackupEntry.objects.create(
backup_run=self.backup_run,
backup_entry_path=self.path_helper.abs_dst_filepath,
hash_hexdigest=hash_hexdigest,
)
self.fast_backup = False
|
python
|
def deduplication_backup(self, process_bar):
"""
Backup the current file and compare the content.
:param process_bar: tqdm process bar
"""
self.fast_backup = False # Was a fast backup used?
src_path = self.dir_path.resolved_path
log.debug("*** deduplication backup: '%s'", src_path)
log.debug("abs_src_filepath: '%s'", self.path_helper.abs_src_filepath)
log.debug("abs_dst_filepath: '%s'", self.path_helper.abs_dst_filepath)
log.debug("abs_dst_hash_filepath: '%s'", self.path_helper.abs_dst_hash_filepath)
log.debug("abs_dst_dir: '%s'", self.path_helper.abs_dst_path)
if not self.path_helper.abs_dst_path.is_dir():
try:
self.path_helper.abs_dst_path.makedirs(mode=phlb_config.default_new_path_mode)
except OSError as err:
raise BackupFileError("Error creating out path: %s" % err)
else:
assert not self.path_helper.abs_dst_filepath.is_file(), (
"Out file already exists: %r" % self.path_helper.abs_src_filepath
)
try:
try:
with self.path_helper.abs_src_filepath.open("rb") as in_file:
with self.path_helper.abs_dst_hash_filepath.open("w") as hash_file:
with self.path_helper.abs_dst_filepath.open("wb") as out_file:
hash = self._deduplication_backup(self.dir_path, in_file, out_file, process_bar)
hash_hexdigest = hash.hexdigest()
hash_file.write(hash_hexdigest)
except OSError as err:
# FIXME: Better error message
raise BackupFileError("Skip file %s error: %s" % (self.path_helper.abs_src_filepath, err))
except KeyboardInterrupt:
# Try to remove created files
try:
self.path_helper.abs_dst_filepath.unlink()
except OSError:
pass
try:
self.path_helper.abs_dst_hash_filepath.unlink()
except OSError:
pass
raise KeyboardInterrupt
old_backup_entry = deduplicate(self.path_helper.abs_dst_filepath, hash_hexdigest)
if old_backup_entry is None:
log.debug("File is unique.")
self.file_linked = False # Was a hardlink used?
else:
log.debug("File was deduplicated via hardlink to: %s" % old_backup_entry)
self.file_linked = True # Was a hardlink used?
# set origin access/modified times to the new created backup file
atime_ns = self.dir_path.stat.st_atime_ns
mtime_ns = self.dir_path.stat.st_mtime_ns
self.path_helper.abs_dst_filepath.utime(ns=(atime_ns, mtime_ns)) # call os.utime()
log.debug("Set mtime to: %s" % mtime_ns)
BackupEntry.objects.create(
backup_run=self.backup_run,
backup_entry_path=self.path_helper.abs_dst_filepath,
hash_hexdigest=hash_hexdigest,
)
self.fast_backup = False
|
[
"def",
"deduplication_backup",
"(",
"self",
",",
"process_bar",
")",
":",
"self",
".",
"fast_backup",
"=",
"False",
"# Was a fast backup used?",
"src_path",
"=",
"self",
".",
"dir_path",
".",
"resolved_path",
"log",
".",
"debug",
"(",
"\"*** deduplication backup: '%s'\"",
",",
"src_path",
")",
"log",
".",
"debug",
"(",
"\"abs_src_filepath: '%s'\"",
",",
"self",
".",
"path_helper",
".",
"abs_src_filepath",
")",
"log",
".",
"debug",
"(",
"\"abs_dst_filepath: '%s'\"",
",",
"self",
".",
"path_helper",
".",
"abs_dst_filepath",
")",
"log",
".",
"debug",
"(",
"\"abs_dst_hash_filepath: '%s'\"",
",",
"self",
".",
"path_helper",
".",
"abs_dst_hash_filepath",
")",
"log",
".",
"debug",
"(",
"\"abs_dst_dir: '%s'\"",
",",
"self",
".",
"path_helper",
".",
"abs_dst_path",
")",
"if",
"not",
"self",
".",
"path_helper",
".",
"abs_dst_path",
".",
"is_dir",
"(",
")",
":",
"try",
":",
"self",
".",
"path_helper",
".",
"abs_dst_path",
".",
"makedirs",
"(",
"mode",
"=",
"phlb_config",
".",
"default_new_path_mode",
")",
"except",
"OSError",
"as",
"err",
":",
"raise",
"BackupFileError",
"(",
"\"Error creating out path: %s\"",
"%",
"err",
")",
"else",
":",
"assert",
"not",
"self",
".",
"path_helper",
".",
"abs_dst_filepath",
".",
"is_file",
"(",
")",
",",
"(",
"\"Out file already exists: %r\"",
"%",
"self",
".",
"path_helper",
".",
"abs_src_filepath",
")",
"try",
":",
"try",
":",
"with",
"self",
".",
"path_helper",
".",
"abs_src_filepath",
".",
"open",
"(",
"\"rb\"",
")",
"as",
"in_file",
":",
"with",
"self",
".",
"path_helper",
".",
"abs_dst_hash_filepath",
".",
"open",
"(",
"\"w\"",
")",
"as",
"hash_file",
":",
"with",
"self",
".",
"path_helper",
".",
"abs_dst_filepath",
".",
"open",
"(",
"\"wb\"",
")",
"as",
"out_file",
":",
"hash",
"=",
"self",
".",
"_deduplication_backup",
"(",
"self",
".",
"dir_path",
",",
"in_file",
",",
"out_file",
",",
"process_bar",
")",
"hash_hexdigest",
"=",
"hash",
".",
"hexdigest",
"(",
")",
"hash_file",
".",
"write",
"(",
"hash_hexdigest",
")",
"except",
"OSError",
"as",
"err",
":",
"# FIXME: Better error message",
"raise",
"BackupFileError",
"(",
"\"Skip file %s error: %s\"",
"%",
"(",
"self",
".",
"path_helper",
".",
"abs_src_filepath",
",",
"err",
")",
")",
"except",
"KeyboardInterrupt",
":",
"# Try to remove created files",
"try",
":",
"self",
".",
"path_helper",
".",
"abs_dst_filepath",
".",
"unlink",
"(",
")",
"except",
"OSError",
":",
"pass",
"try",
":",
"self",
".",
"path_helper",
".",
"abs_dst_hash_filepath",
".",
"unlink",
"(",
")",
"except",
"OSError",
":",
"pass",
"raise",
"KeyboardInterrupt",
"old_backup_entry",
"=",
"deduplicate",
"(",
"self",
".",
"path_helper",
".",
"abs_dst_filepath",
",",
"hash_hexdigest",
")",
"if",
"old_backup_entry",
"is",
"None",
":",
"log",
".",
"debug",
"(",
"\"File is unique.\"",
")",
"self",
".",
"file_linked",
"=",
"False",
"# Was a hardlink used?",
"else",
":",
"log",
".",
"debug",
"(",
"\"File was deduplicated via hardlink to: %s\"",
"%",
"old_backup_entry",
")",
"self",
".",
"file_linked",
"=",
"True",
"# Was a hardlink used?",
"# set origin access/modified times to the new created backup file",
"atime_ns",
"=",
"self",
".",
"dir_path",
".",
"stat",
".",
"st_atime_ns",
"mtime_ns",
"=",
"self",
".",
"dir_path",
".",
"stat",
".",
"st_mtime_ns",
"self",
".",
"path_helper",
".",
"abs_dst_filepath",
".",
"utime",
"(",
"ns",
"=",
"(",
"atime_ns",
",",
"mtime_ns",
")",
")",
"# call os.utime()",
"log",
".",
"debug",
"(",
"\"Set mtime to: %s\"",
"%",
"mtime_ns",
")",
"BackupEntry",
".",
"objects",
".",
"create",
"(",
"backup_run",
"=",
"self",
".",
"backup_run",
",",
"backup_entry_path",
"=",
"self",
".",
"path_helper",
".",
"abs_dst_filepath",
",",
"hash_hexdigest",
"=",
"hash_hexdigest",
",",
")",
"self",
".",
"fast_backup",
"=",
"False"
] |
Backup the current file and compare the content.
:param process_bar: tqdm process bar
|
[
"Backup",
"the",
"current",
"file",
"and",
"compare",
"the",
"content",
"."
] |
be28666834d2d9e3d8aac1b661cb2d5bd4056c29
|
https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/phlb/phlb_main.py#L156-L225
|
train
|
jedie/PyHardLinkBackup
|
PyHardLinkBackup/phlb/phlb_main.py
|
HardLinkBackup._backup_dir_item
|
def _backup_dir_item(self, dir_path, process_bar):
"""
Backup one dir item
:param dir_path: filesystem_walk.DirEntryPath() instance
"""
self.path_helper.set_src_filepath(dir_path)
if self.path_helper.abs_src_filepath is None:
self.total_errored_items += 1
log.info("Can't backup %r", dir_path)
# self.summary(no, dir_path.stat.st_mtime, end=" ")
if dir_path.is_symlink:
self.summary("TODO Symlink: %s" % dir_path)
return
if dir_path.resolve_error is not None:
self.summary("TODO resolve error: %s" % dir_path.resolve_error)
pprint_path(dir_path)
return
if dir_path.different_path:
self.summary("TODO different path:")
pprint_path(dir_path)
return
if dir_path.is_dir:
self.summary("TODO dir: %s" % dir_path)
elif dir_path.is_file:
# self.summary("Normal file: %s", dir_path)
file_backup = FileBackup(dir_path, self.path_helper, self.backup_run)
old_backup_entry = self.fast_compare(dir_path)
if old_backup_entry is not None:
# We can just link the file from a old backup
file_backup.fast_deduplication_backup(old_backup_entry, process_bar)
else:
file_backup.deduplication_backup(process_bar)
assert file_backup.fast_backup is not None, dir_path.path
assert file_backup.file_linked is not None, dir_path.path
file_size = dir_path.stat.st_size
if file_backup.file_linked:
# os.link() was used
self.total_file_link_count += 1
self.total_stined_bytes += file_size
else:
self.total_new_file_count += 1
self.total_new_bytes += file_size
if file_backup.fast_backup:
self.total_fast_backup += 1
else:
self.summary("TODO:" % dir_path)
pprint_path(dir_path)
|
python
|
def _backup_dir_item(self, dir_path, process_bar):
"""
Backup one dir item
:param dir_path: filesystem_walk.DirEntryPath() instance
"""
self.path_helper.set_src_filepath(dir_path)
if self.path_helper.abs_src_filepath is None:
self.total_errored_items += 1
log.info("Can't backup %r", dir_path)
# self.summary(no, dir_path.stat.st_mtime, end=" ")
if dir_path.is_symlink:
self.summary("TODO Symlink: %s" % dir_path)
return
if dir_path.resolve_error is not None:
self.summary("TODO resolve error: %s" % dir_path.resolve_error)
pprint_path(dir_path)
return
if dir_path.different_path:
self.summary("TODO different path:")
pprint_path(dir_path)
return
if dir_path.is_dir:
self.summary("TODO dir: %s" % dir_path)
elif dir_path.is_file:
# self.summary("Normal file: %s", dir_path)
file_backup = FileBackup(dir_path, self.path_helper, self.backup_run)
old_backup_entry = self.fast_compare(dir_path)
if old_backup_entry is not None:
# We can just link the file from a old backup
file_backup.fast_deduplication_backup(old_backup_entry, process_bar)
else:
file_backup.deduplication_backup(process_bar)
assert file_backup.fast_backup is not None, dir_path.path
assert file_backup.file_linked is not None, dir_path.path
file_size = dir_path.stat.st_size
if file_backup.file_linked:
# os.link() was used
self.total_file_link_count += 1
self.total_stined_bytes += file_size
else:
self.total_new_file_count += 1
self.total_new_bytes += file_size
if file_backup.fast_backup:
self.total_fast_backup += 1
else:
self.summary("TODO:" % dir_path)
pprint_path(dir_path)
|
[
"def",
"_backup_dir_item",
"(",
"self",
",",
"dir_path",
",",
"process_bar",
")",
":",
"self",
".",
"path_helper",
".",
"set_src_filepath",
"(",
"dir_path",
")",
"if",
"self",
".",
"path_helper",
".",
"abs_src_filepath",
"is",
"None",
":",
"self",
".",
"total_errored_items",
"+=",
"1",
"log",
".",
"info",
"(",
"\"Can't backup %r\"",
",",
"dir_path",
")",
"# self.summary(no, dir_path.stat.st_mtime, end=\" \")",
"if",
"dir_path",
".",
"is_symlink",
":",
"self",
".",
"summary",
"(",
"\"TODO Symlink: %s\"",
"%",
"dir_path",
")",
"return",
"if",
"dir_path",
".",
"resolve_error",
"is",
"not",
"None",
":",
"self",
".",
"summary",
"(",
"\"TODO resolve error: %s\"",
"%",
"dir_path",
".",
"resolve_error",
")",
"pprint_path",
"(",
"dir_path",
")",
"return",
"if",
"dir_path",
".",
"different_path",
":",
"self",
".",
"summary",
"(",
"\"TODO different path:\"",
")",
"pprint_path",
"(",
"dir_path",
")",
"return",
"if",
"dir_path",
".",
"is_dir",
":",
"self",
".",
"summary",
"(",
"\"TODO dir: %s\"",
"%",
"dir_path",
")",
"elif",
"dir_path",
".",
"is_file",
":",
"# self.summary(\"Normal file: %s\", dir_path)",
"file_backup",
"=",
"FileBackup",
"(",
"dir_path",
",",
"self",
".",
"path_helper",
",",
"self",
".",
"backup_run",
")",
"old_backup_entry",
"=",
"self",
".",
"fast_compare",
"(",
"dir_path",
")",
"if",
"old_backup_entry",
"is",
"not",
"None",
":",
"# We can just link the file from a old backup",
"file_backup",
".",
"fast_deduplication_backup",
"(",
"old_backup_entry",
",",
"process_bar",
")",
"else",
":",
"file_backup",
".",
"deduplication_backup",
"(",
"process_bar",
")",
"assert",
"file_backup",
".",
"fast_backup",
"is",
"not",
"None",
",",
"dir_path",
".",
"path",
"assert",
"file_backup",
".",
"file_linked",
"is",
"not",
"None",
",",
"dir_path",
".",
"path",
"file_size",
"=",
"dir_path",
".",
"stat",
".",
"st_size",
"if",
"file_backup",
".",
"file_linked",
":",
"# os.link() was used",
"self",
".",
"total_file_link_count",
"+=",
"1",
"self",
".",
"total_stined_bytes",
"+=",
"file_size",
"else",
":",
"self",
".",
"total_new_file_count",
"+=",
"1",
"self",
".",
"total_new_bytes",
"+=",
"file_size",
"if",
"file_backup",
".",
"fast_backup",
":",
"self",
".",
"total_fast_backup",
"+=",
"1",
"else",
":",
"self",
".",
"summary",
"(",
"\"TODO:\"",
"%",
"dir_path",
")",
"pprint_path",
"(",
"dir_path",
")"
] |
Backup one dir item
:param dir_path: filesystem_walk.DirEntryPath() instance
|
[
"Backup",
"one",
"dir",
"item"
] |
be28666834d2d9e3d8aac1b661cb2d5bd4056c29
|
https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/phlb/phlb_main.py#L456-L511
|
train
|
jedie/PyHardLinkBackup
|
PyHardLinkBackup/phlb/phlb_main.py
|
HardLinkBackup.print_update
|
def print_update(self):
"""
print some status information in between.
"""
print("\r\n")
now = datetime.datetime.now()
print("Update info: (from: %s)" % now.strftime("%c"))
current_total_size = self.total_stined_bytes + self.total_new_bytes
if self.total_errored_items:
print(" * WARNING: %i omitted files!" % self.total_errored_items)
print(" * fast backup: %i files" % self.total_fast_backup)
print(
" * new content saved: %i files (%s %.1f%%)"
% (
self.total_new_file_count,
human_filesize(self.total_new_bytes),
to_percent(self.total_new_bytes, current_total_size),
)
)
print(
" * stint space via hardlinks: %i files (%s %.1f%%)"
% (
self.total_file_link_count,
human_filesize(self.total_stined_bytes),
to_percent(self.total_stined_bytes, current_total_size),
)
)
duration = default_timer() - self.start_time
performance = current_total_size / duration / 1024.0 / 1024.0
print(" * present performance: %.1fMB/s\n" % performance)
|
python
|
def print_update(self):
"""
print some status information in between.
"""
print("\r\n")
now = datetime.datetime.now()
print("Update info: (from: %s)" % now.strftime("%c"))
current_total_size = self.total_stined_bytes + self.total_new_bytes
if self.total_errored_items:
print(" * WARNING: %i omitted files!" % self.total_errored_items)
print(" * fast backup: %i files" % self.total_fast_backup)
print(
" * new content saved: %i files (%s %.1f%%)"
% (
self.total_new_file_count,
human_filesize(self.total_new_bytes),
to_percent(self.total_new_bytes, current_total_size),
)
)
print(
" * stint space via hardlinks: %i files (%s %.1f%%)"
% (
self.total_file_link_count,
human_filesize(self.total_stined_bytes),
to_percent(self.total_stined_bytes, current_total_size),
)
)
duration = default_timer() - self.start_time
performance = current_total_size / duration / 1024.0 / 1024.0
print(" * present performance: %.1fMB/s\n" % performance)
|
[
"def",
"print_update",
"(",
"self",
")",
":",
"print",
"(",
"\"\\r\\n\"",
")",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"print",
"(",
"\"Update info: (from: %s)\"",
"%",
"now",
".",
"strftime",
"(",
"\"%c\"",
")",
")",
"current_total_size",
"=",
"self",
".",
"total_stined_bytes",
"+",
"self",
".",
"total_new_bytes",
"if",
"self",
".",
"total_errored_items",
":",
"print",
"(",
"\" * WARNING: %i omitted files!\"",
"%",
"self",
".",
"total_errored_items",
")",
"print",
"(",
"\" * fast backup: %i files\"",
"%",
"self",
".",
"total_fast_backup",
")",
"print",
"(",
"\" * new content saved: %i files (%s %.1f%%)\"",
"%",
"(",
"self",
".",
"total_new_file_count",
",",
"human_filesize",
"(",
"self",
".",
"total_new_bytes",
")",
",",
"to_percent",
"(",
"self",
".",
"total_new_bytes",
",",
"current_total_size",
")",
",",
")",
")",
"print",
"(",
"\" * stint space via hardlinks: %i files (%s %.1f%%)\"",
"%",
"(",
"self",
".",
"total_file_link_count",
",",
"human_filesize",
"(",
"self",
".",
"total_stined_bytes",
")",
",",
"to_percent",
"(",
"self",
".",
"total_stined_bytes",
",",
"current_total_size",
")",
",",
")",
")",
"duration",
"=",
"default_timer",
"(",
")",
"-",
"self",
".",
"start_time",
"performance",
"=",
"current_total_size",
"/",
"duration",
"/",
"1024.0",
"/",
"1024.0",
"print",
"(",
"\" * present performance: %.1fMB/s\\n\"",
"%",
"performance",
")"
] |
print some status information in between.
|
[
"print",
"some",
"status",
"information",
"in",
"between",
"."
] |
be28666834d2d9e3d8aac1b661cb2d5bd4056c29
|
https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/phlb/phlb_main.py#L551-L586
|
train
|
aiidateam/aiida-codtools
|
aiida_codtools/cli/calculations/cod_tools.py
|
cli
|
def cli(code, cif, parameters, daemon):
"""Run any cod-tools calculation for the given ``CifData`` node.
The ``-p/--parameters`` option takes a single string with any command line parameters that you want to be passed
to the calculation, and by extension the cod-tools script. Example::
launch_calculation_cod_tools -X cif-filter -N 95 -p '--use-c-parser --authors "Jane Doe; John Doe"'
The parameters will be parsed into a dictionary and passed as the ``parameters`` input node to the calculation.
"""
from aiida import orm
from aiida.plugins import factories
from aiida_codtools.common.cli import CliParameters, CliRunner
from aiida_codtools.common.resources import get_default_options
process = factories.CalculationFactory(code.get_attribute('input_plugin'))
parameters = CliParameters.from_string(parameters).get_dictionary()
inputs = {
'cif': cif,
'code': code,
'metadata': {'options': get_default_options()}
}
if parameters:
inputs['parameters'] = orm.Dict(dict=parameters)
cli_runner = CliRunner(process, inputs)
cli_runner.run(daemon=daemon)
|
python
|
def cli(code, cif, parameters, daemon):
"""Run any cod-tools calculation for the given ``CifData`` node.
The ``-p/--parameters`` option takes a single string with any command line parameters that you want to be passed
to the calculation, and by extension the cod-tools script. Example::
launch_calculation_cod_tools -X cif-filter -N 95 -p '--use-c-parser --authors "Jane Doe; John Doe"'
The parameters will be parsed into a dictionary and passed as the ``parameters`` input node to the calculation.
"""
from aiida import orm
from aiida.plugins import factories
from aiida_codtools.common.cli import CliParameters, CliRunner
from aiida_codtools.common.resources import get_default_options
process = factories.CalculationFactory(code.get_attribute('input_plugin'))
parameters = CliParameters.from_string(parameters).get_dictionary()
inputs = {
'cif': cif,
'code': code,
'metadata': {'options': get_default_options()}
}
if parameters:
inputs['parameters'] = orm.Dict(dict=parameters)
cli_runner = CliRunner(process, inputs)
cli_runner.run(daemon=daemon)
|
[
"def",
"cli",
"(",
"code",
",",
"cif",
",",
"parameters",
",",
"daemon",
")",
":",
"from",
"aiida",
"import",
"orm",
"from",
"aiida",
".",
"plugins",
"import",
"factories",
"from",
"aiida_codtools",
".",
"common",
".",
"cli",
"import",
"CliParameters",
",",
"CliRunner",
"from",
"aiida_codtools",
".",
"common",
".",
"resources",
"import",
"get_default_options",
"process",
"=",
"factories",
".",
"CalculationFactory",
"(",
"code",
".",
"get_attribute",
"(",
"'input_plugin'",
")",
")",
"parameters",
"=",
"CliParameters",
".",
"from_string",
"(",
"parameters",
")",
".",
"get_dictionary",
"(",
")",
"inputs",
"=",
"{",
"'cif'",
":",
"cif",
",",
"'code'",
":",
"code",
",",
"'metadata'",
":",
"{",
"'options'",
":",
"get_default_options",
"(",
")",
"}",
"}",
"if",
"parameters",
":",
"inputs",
"[",
"'parameters'",
"]",
"=",
"orm",
".",
"Dict",
"(",
"dict",
"=",
"parameters",
")",
"cli_runner",
"=",
"CliRunner",
"(",
"process",
",",
"inputs",
")",
"cli_runner",
".",
"run",
"(",
"daemon",
"=",
"daemon",
")"
] |
Run any cod-tools calculation for the given ``CifData`` node.
The ``-p/--parameters`` option takes a single string with any command line parameters that you want to be passed
to the calculation, and by extension the cod-tools script. Example::
launch_calculation_cod_tools -X cif-filter -N 95 -p '--use-c-parser --authors "Jane Doe; John Doe"'
The parameters will be parsed into a dictionary and passed as the ``parameters`` input node to the calculation.
|
[
"Run",
"any",
"cod",
"-",
"tools",
"calculation",
"for",
"the",
"given",
"CifData",
"node",
"."
] |
da5e4259b7a2e86cf0cc3f997e11dd36d445fa94
|
https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/cli/calculations/cod_tools.py#L22-L50
|
train
|
jiasir/playback
|
playback/cli/environment.py
|
make
|
def make(parser):
"""DEPRECATED
prepare OpenStack basic environment"""
s = parser.add_subparsers(
title='commands',
metavar='COMMAND',
help='description',
)
def gen_pass_f(args):
gen_pass()
gen_pass_parser = s.add_parser('gen-pass', help='generate the password')
gen_pass_parser.set_defaults(func=gen_pass_f)
def cmd_f(args):
cmd(args.user, args.hosts.split(','), args.key_filename, args.password, args.run)
cmd_parser = s.add_parser('cmd', help='run command line on the target host')
cmd_parser.add_argument('--run', help='the command running on the remote node', action='store', default=None, dest='run')
cmd_parser.set_defaults(func=cmd_f)
|
python
|
def make(parser):
"""DEPRECATED
prepare OpenStack basic environment"""
s = parser.add_subparsers(
title='commands',
metavar='COMMAND',
help='description',
)
def gen_pass_f(args):
gen_pass()
gen_pass_parser = s.add_parser('gen-pass', help='generate the password')
gen_pass_parser.set_defaults(func=gen_pass_f)
def cmd_f(args):
cmd(args.user, args.hosts.split(','), args.key_filename, args.password, args.run)
cmd_parser = s.add_parser('cmd', help='run command line on the target host')
cmd_parser.add_argument('--run', help='the command running on the remote node', action='store', default=None, dest='run')
cmd_parser.set_defaults(func=cmd_f)
|
[
"def",
"make",
"(",
"parser",
")",
":",
"s",
"=",
"parser",
".",
"add_subparsers",
"(",
"title",
"=",
"'commands'",
",",
"metavar",
"=",
"'COMMAND'",
",",
"help",
"=",
"'description'",
",",
")",
"def",
"gen_pass_f",
"(",
"args",
")",
":",
"gen_pass",
"(",
")",
"gen_pass_parser",
"=",
"s",
".",
"add_parser",
"(",
"'gen-pass'",
",",
"help",
"=",
"'generate the password'",
")",
"gen_pass_parser",
".",
"set_defaults",
"(",
"func",
"=",
"gen_pass_f",
")",
"def",
"cmd_f",
"(",
"args",
")",
":",
"cmd",
"(",
"args",
".",
"user",
",",
"args",
".",
"hosts",
".",
"split",
"(",
"','",
")",
",",
"args",
".",
"key_filename",
",",
"args",
".",
"password",
",",
"args",
".",
"run",
")",
"cmd_parser",
"=",
"s",
".",
"add_parser",
"(",
"'cmd'",
",",
"help",
"=",
"'run command line on the target host'",
")",
"cmd_parser",
".",
"add_argument",
"(",
"'--run'",
",",
"help",
"=",
"'the command running on the remote node'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'run'",
")",
"cmd_parser",
".",
"set_defaults",
"(",
"func",
"=",
"cmd_f",
")"
] |
DEPRECATED
prepare OpenStack basic environment
|
[
"DEPRECATED",
"prepare",
"OpenStack",
"basic",
"environment"
] |
58b2a5d669dcfaa8cad50c544a4b068dcacf9b69
|
https://github.com/jiasir/playback/blob/58b2a5d669dcfaa8cad50c544a4b068dcacf9b69/playback/cli/environment.py#L39-L57
|
train
|
klen/muffin-admin
|
muffin_admin/peewee.py
|
pw_converter
|
def pw_converter(handler, flt):
"""Convert column name to filter."""
import peewee as pw
if isinstance(flt, Filter):
return flt
model = handler.model
field = getattr(model, flt)
if isinstance(field, pw.BooleanField):
return PWBoolFilter(flt)
if field.choices:
choices = [(Filter.default, '---')] + list(field.choices)
return PWChoiceFilter(flt, choices=choices)
return PWFilter(flt)
|
python
|
def pw_converter(handler, flt):
"""Convert column name to filter."""
import peewee as pw
if isinstance(flt, Filter):
return flt
model = handler.model
field = getattr(model, flt)
if isinstance(field, pw.BooleanField):
return PWBoolFilter(flt)
if field.choices:
choices = [(Filter.default, '---')] + list(field.choices)
return PWChoiceFilter(flt, choices=choices)
return PWFilter(flt)
|
[
"def",
"pw_converter",
"(",
"handler",
",",
"flt",
")",
":",
"import",
"peewee",
"as",
"pw",
"if",
"isinstance",
"(",
"flt",
",",
"Filter",
")",
":",
"return",
"flt",
"model",
"=",
"handler",
".",
"model",
"field",
"=",
"getattr",
"(",
"model",
",",
"flt",
")",
"if",
"isinstance",
"(",
"field",
",",
"pw",
".",
"BooleanField",
")",
":",
"return",
"PWBoolFilter",
"(",
"flt",
")",
"if",
"field",
".",
"choices",
":",
"choices",
"=",
"[",
"(",
"Filter",
".",
"default",
",",
"'---'",
")",
"]",
"+",
"list",
"(",
"field",
".",
"choices",
")",
"return",
"PWChoiceFilter",
"(",
"flt",
",",
"choices",
"=",
"choices",
")",
"return",
"PWFilter",
"(",
"flt",
")"
] |
Convert column name to filter.
|
[
"Convert",
"column",
"name",
"to",
"filter",
"."
] |
404dc8e5107e943b7c42fa21c679c34ddb4de1d5
|
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/peewee.py#L24-L41
|
train
|
klen/muffin-admin
|
muffin_admin/peewee.py
|
RawIDField.process
|
def process(self, *args, **kwargs):
"""Get a description."""
super(RawIDField, self).process(*args, **kwargs)
if self.object_data:
self.description = self.description or str(self.object_data)
|
python
|
def process(self, *args, **kwargs):
"""Get a description."""
super(RawIDField, self).process(*args, **kwargs)
if self.object_data:
self.description = self.description or str(self.object_data)
|
[
"def",
"process",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"RawIDField",
",",
"self",
")",
".",
"process",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"object_data",
":",
"self",
".",
"description",
"=",
"self",
".",
"description",
"or",
"str",
"(",
"self",
".",
"object_data",
")"
] |
Get a description.
|
[
"Get",
"a",
"description",
"."
] |
404dc8e5107e943b7c42fa21c679c34ddb4de1d5
|
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/peewee.py#L53-L57
|
train
|
klen/muffin-admin
|
muffin_admin/peewee.py
|
RawIDField._value
|
def _value(self):
"""Get field value."""
if self.data is not None:
value = self.data._data.get(self.field.to_field.name)
return str(value)
return ''
|
python
|
def _value(self):
"""Get field value."""
if self.data is not None:
value = self.data._data.get(self.field.to_field.name)
return str(value)
return ''
|
[
"def",
"_value",
"(",
"self",
")",
":",
"if",
"self",
".",
"data",
"is",
"not",
"None",
":",
"value",
"=",
"self",
".",
"data",
".",
"_data",
".",
"get",
"(",
"self",
".",
"field",
".",
"to_field",
".",
"name",
")",
"return",
"str",
"(",
"value",
")",
"return",
"''"
] |
Get field value.
|
[
"Get",
"field",
"value",
"."
] |
404dc8e5107e943b7c42fa21c679c34ddb4de1d5
|
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/peewee.py#L59-L64
|
train
|
klen/muffin-admin
|
muffin_admin/peewee.py
|
PWAdminHandler.sort
|
def sort(self, request, reverse=False):
"""Sort current collection."""
field = self.model._meta.fields.get(self.columns_sort)
if not field:
return self.collection
if reverse:
field = field.desc()
return self.collection.order_by(field)
|
python
|
def sort(self, request, reverse=False):
"""Sort current collection."""
field = self.model._meta.fields.get(self.columns_sort)
if not field:
return self.collection
if reverse:
field = field.desc()
return self.collection.order_by(field)
|
[
"def",
"sort",
"(",
"self",
",",
"request",
",",
"reverse",
"=",
"False",
")",
":",
"field",
"=",
"self",
".",
"model",
".",
"_meta",
".",
"fields",
".",
"get",
"(",
"self",
".",
"columns_sort",
")",
"if",
"not",
"field",
":",
"return",
"self",
".",
"collection",
"if",
"reverse",
":",
"field",
"=",
"field",
".",
"desc",
"(",
")",
"return",
"self",
".",
"collection",
".",
"order_by",
"(",
"field",
")"
] |
Sort current collection.
|
[
"Sort",
"current",
"collection",
"."
] |
404dc8e5107e943b7c42fa21c679c34ddb4de1d5
|
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/peewee.py#L157-L166
|
train
|
klen/muffin-admin
|
muffin_admin/peewee.py
|
PWBoolFilter.value
|
def value(self, data):
"""Get value from data."""
value = data.get(self.name)
if value:
return int(value)
return self.default
|
python
|
def value(self, data):
"""Get value from data."""
value = data.get(self.name)
if value:
return int(value)
return self.default
|
[
"def",
"value",
"(",
"self",
",",
"data",
")",
":",
"value",
"=",
"data",
".",
"get",
"(",
"self",
".",
"name",
")",
"if",
"value",
":",
"return",
"int",
"(",
"value",
")",
"return",
"self",
".",
"default"
] |
Get value from data.
|
[
"Get",
"value",
"from",
"data",
"."
] |
404dc8e5107e943b7c42fa21c679c34ddb4de1d5
|
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/peewee.py#L251-L256
|
train
|
uyar/pygenstub
|
pygenstub.py
|
get_fields
|
def get_fields(node, fields_tag="field_list"):
"""Get the field names and their values from a node.
:sig: (Document, str) -> Dict[str, str]
:param node: Node to get the fields from.
:param fields_tag: Tag of child node that contains the fields.
:return: Names and values of fields.
"""
fields_nodes = [c for c in node.children if c.tagname == fields_tag]
if len(fields_nodes) == 0:
return {}
assert len(fields_nodes) == 1, "multiple nodes with tag " + fields_tag
fields_node = fields_nodes[0]
fields = [
{f.tagname: f.rawsource.strip() for f in n.children}
for n in fields_node.children
if n.tagname == "field"
]
return {f["field_name"]: f["field_body"] for f in fields}
|
python
|
def get_fields(node, fields_tag="field_list"):
"""Get the field names and their values from a node.
:sig: (Document, str) -> Dict[str, str]
:param node: Node to get the fields from.
:param fields_tag: Tag of child node that contains the fields.
:return: Names and values of fields.
"""
fields_nodes = [c for c in node.children if c.tagname == fields_tag]
if len(fields_nodes) == 0:
return {}
assert len(fields_nodes) == 1, "multiple nodes with tag " + fields_tag
fields_node = fields_nodes[0]
fields = [
{f.tagname: f.rawsource.strip() for f in n.children}
for n in fields_node.children
if n.tagname == "field"
]
return {f["field_name"]: f["field_body"] for f in fields}
|
[
"def",
"get_fields",
"(",
"node",
",",
"fields_tag",
"=",
"\"field_list\"",
")",
":",
"fields_nodes",
"=",
"[",
"c",
"for",
"c",
"in",
"node",
".",
"children",
"if",
"c",
".",
"tagname",
"==",
"fields_tag",
"]",
"if",
"len",
"(",
"fields_nodes",
")",
"==",
"0",
":",
"return",
"{",
"}",
"assert",
"len",
"(",
"fields_nodes",
")",
"==",
"1",
",",
"\"multiple nodes with tag \"",
"+",
"fields_tag",
"fields_node",
"=",
"fields_nodes",
"[",
"0",
"]",
"fields",
"=",
"[",
"{",
"f",
".",
"tagname",
":",
"f",
".",
"rawsource",
".",
"strip",
"(",
")",
"for",
"f",
"in",
"n",
".",
"children",
"}",
"for",
"n",
"in",
"fields_node",
".",
"children",
"if",
"n",
".",
"tagname",
"==",
"\"field\"",
"]",
"return",
"{",
"f",
"[",
"\"field_name\"",
"]",
":",
"f",
"[",
"\"field_body\"",
"]",
"for",
"f",
"in",
"fields",
"}"
] |
Get the field names and their values from a node.
:sig: (Document, str) -> Dict[str, str]
:param node: Node to get the fields from.
:param fields_tag: Tag of child node that contains the fields.
:return: Names and values of fields.
|
[
"Get",
"the",
"field",
"names",
"and",
"their",
"values",
"from",
"a",
"node",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L81-L99
|
train
|
uyar/pygenstub
|
pygenstub.py
|
extract_signature
|
def extract_signature(docstring):
"""Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
"""
root = publish_doctree(docstring, settings_overrides={"report_level": 5})
fields = get_fields(root)
return fields.get(SIG_FIELD)
|
python
|
def extract_signature(docstring):
"""Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
"""
root = publish_doctree(docstring, settings_overrides={"report_level": 5})
fields = get_fields(root)
return fields.get(SIG_FIELD)
|
[
"def",
"extract_signature",
"(",
"docstring",
")",
":",
"root",
"=",
"publish_doctree",
"(",
"docstring",
",",
"settings_overrides",
"=",
"{",
"\"report_level\"",
":",
"5",
"}",
")",
"fields",
"=",
"get_fields",
"(",
"root",
")",
"return",
"fields",
".",
"get",
"(",
"SIG_FIELD",
")"
] |
Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
|
[
"Extract",
"the",
"signature",
"from",
"a",
"docstring",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L102-L111
|
train
|
uyar/pygenstub
|
pygenstub.py
|
split_parameter_types
|
def split_parameter_types(parameters):
"""Split a parameter types declaration into individual types.
The input is the left hand side of a signature (the part before the arrow),
excluding the parentheses.
:sig: (str) -> List[str]
:param parameters: Comma separated parameter types.
:return: Parameter types.
"""
if parameters == "":
return []
# only consider the top level commas, ignore the ones in []
commas = []
bracket_depth = 0
for i, char in enumerate(parameters):
if (char == ",") and (bracket_depth == 0):
commas.append(i)
elif char == "[":
bracket_depth += 1
elif char == "]":
bracket_depth -= 1
types = []
last_i = 0
for i in commas:
types.append(parameters[last_i:i].strip())
last_i = i + 1
else:
types.append(parameters[last_i:].strip())
return types
|
python
|
def split_parameter_types(parameters):
"""Split a parameter types declaration into individual types.
The input is the left hand side of a signature (the part before the arrow),
excluding the parentheses.
:sig: (str) -> List[str]
:param parameters: Comma separated parameter types.
:return: Parameter types.
"""
if parameters == "":
return []
# only consider the top level commas, ignore the ones in []
commas = []
bracket_depth = 0
for i, char in enumerate(parameters):
if (char == ",") and (bracket_depth == 0):
commas.append(i)
elif char == "[":
bracket_depth += 1
elif char == "]":
bracket_depth -= 1
types = []
last_i = 0
for i in commas:
types.append(parameters[last_i:i].strip())
last_i = i + 1
else:
types.append(parameters[last_i:].strip())
return types
|
[
"def",
"split_parameter_types",
"(",
"parameters",
")",
":",
"if",
"parameters",
"==",
"\"\"",
":",
"return",
"[",
"]",
"# only consider the top level commas, ignore the ones in []",
"commas",
"=",
"[",
"]",
"bracket_depth",
"=",
"0",
"for",
"i",
",",
"char",
"in",
"enumerate",
"(",
"parameters",
")",
":",
"if",
"(",
"char",
"==",
"\",\"",
")",
"and",
"(",
"bracket_depth",
"==",
"0",
")",
":",
"commas",
".",
"append",
"(",
"i",
")",
"elif",
"char",
"==",
"\"[\"",
":",
"bracket_depth",
"+=",
"1",
"elif",
"char",
"==",
"\"]\"",
":",
"bracket_depth",
"-=",
"1",
"types",
"=",
"[",
"]",
"last_i",
"=",
"0",
"for",
"i",
"in",
"commas",
":",
"types",
".",
"append",
"(",
"parameters",
"[",
"last_i",
":",
"i",
"]",
".",
"strip",
"(",
")",
")",
"last_i",
"=",
"i",
"+",
"1",
"else",
":",
"types",
".",
"append",
"(",
"parameters",
"[",
"last_i",
":",
"]",
".",
"strip",
"(",
")",
")",
"return",
"types"
] |
Split a parameter types declaration into individual types.
The input is the left hand side of a signature (the part before the arrow),
excluding the parentheses.
:sig: (str) -> List[str]
:param parameters: Comma separated parameter types.
:return: Parameter types.
|
[
"Split",
"a",
"parameter",
"types",
"declaration",
"into",
"individual",
"types",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L127-L158
|
train
|
uyar/pygenstub
|
pygenstub.py
|
parse_signature
|
def parse_signature(signature):
"""Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types.
"""
if " -> " not in signature:
# signature comment: no parameters, treat variable type as return type
param_types, return_type = None, signature.strip()
else:
lhs, return_type = [s.strip() for s in signature.split(" -> ")]
csv = lhs[1:-1].strip() # remove the parentheses around the parameter type list
param_types = split_parameter_types(csv)
requires = set(_RE_QUALIFIED_TYPES.findall(signature))
return param_types, return_type, requires
|
python
|
def parse_signature(signature):
"""Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types.
"""
if " -> " not in signature:
# signature comment: no parameters, treat variable type as return type
param_types, return_type = None, signature.strip()
else:
lhs, return_type = [s.strip() for s in signature.split(" -> ")]
csv = lhs[1:-1].strip() # remove the parentheses around the parameter type list
param_types = split_parameter_types(csv)
requires = set(_RE_QUALIFIED_TYPES.findall(signature))
return param_types, return_type, requires
|
[
"def",
"parse_signature",
"(",
"signature",
")",
":",
"if",
"\" -> \"",
"not",
"in",
"signature",
":",
"# signature comment: no parameters, treat variable type as return type",
"param_types",
",",
"return_type",
"=",
"None",
",",
"signature",
".",
"strip",
"(",
")",
"else",
":",
"lhs",
",",
"return_type",
"=",
"[",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"signature",
".",
"split",
"(",
"\" -> \"",
")",
"]",
"csv",
"=",
"lhs",
"[",
"1",
":",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"# remove the parentheses around the parameter type list",
"param_types",
"=",
"split_parameter_types",
"(",
"csv",
")",
"requires",
"=",
"set",
"(",
"_RE_QUALIFIED_TYPES",
".",
"findall",
"(",
"signature",
")",
")",
"return",
"param_types",
",",
"return_type",
",",
"requires"
] |
Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types.
|
[
"Parse",
"a",
"signature",
"into",
"its",
"input",
"and",
"return",
"parameter",
"types",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L161-L179
|
train
|
uyar/pygenstub
|
pygenstub.py
|
get_aliases
|
def get_aliases(lines):
"""Get the type aliases in the source.
:sig: (Sequence[str]) -> Dict[str, str]
:param lines: Lines of the source code.
:return: Aliases and their their definitions.
"""
aliases = {}
for line in lines:
line = line.strip()
if len(line) > 0 and line.startswith(SIG_ALIAS):
_, content = line.split(SIG_ALIAS)
alias, signature = [t.strip() for t in content.split("=")]
aliases[alias] = signature
return aliases
|
python
|
def get_aliases(lines):
"""Get the type aliases in the source.
:sig: (Sequence[str]) -> Dict[str, str]
:param lines: Lines of the source code.
:return: Aliases and their their definitions.
"""
aliases = {}
for line in lines:
line = line.strip()
if len(line) > 0 and line.startswith(SIG_ALIAS):
_, content = line.split(SIG_ALIAS)
alias, signature = [t.strip() for t in content.split("=")]
aliases[alias] = signature
return aliases
|
[
"def",
"get_aliases",
"(",
"lines",
")",
":",
"aliases",
"=",
"{",
"}",
"for",
"line",
"in",
"lines",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"line",
")",
">",
"0",
"and",
"line",
".",
"startswith",
"(",
"SIG_ALIAS",
")",
":",
"_",
",",
"content",
"=",
"line",
".",
"split",
"(",
"SIG_ALIAS",
")",
"alias",
",",
"signature",
"=",
"[",
"t",
".",
"strip",
"(",
")",
"for",
"t",
"in",
"content",
".",
"split",
"(",
"\"=\"",
")",
"]",
"aliases",
"[",
"alias",
"]",
"=",
"signature",
"return",
"aliases"
] |
Get the type aliases in the source.
:sig: (Sequence[str]) -> Dict[str, str]
:param lines: Lines of the source code.
:return: Aliases and their their definitions.
|
[
"Get",
"the",
"type",
"aliases",
"in",
"the",
"source",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L372-L386
|
train
|
uyar/pygenstub
|
pygenstub.py
|
get_stub
|
def get_stub(source, generic=False):
"""Get the stub code for a source code.
:sig: (str, bool) -> str
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
:return: Generated stub code.
"""
generator = StubGenerator(source, generic=generic)
stub = generator.generate_stub()
return stub
|
python
|
def get_stub(source, generic=False):
"""Get the stub code for a source code.
:sig: (str, bool) -> str
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
:return: Generated stub code.
"""
generator = StubGenerator(source, generic=generic)
stub = generator.generate_stub()
return stub
|
[
"def",
"get_stub",
"(",
"source",
",",
"generic",
"=",
"False",
")",
":",
"generator",
"=",
"StubGenerator",
"(",
"source",
",",
"generic",
"=",
"generic",
")",
"stub",
"=",
"generator",
".",
"generate_stub",
"(",
")",
"return",
"stub"
] |
Get the stub code for a source code.
:sig: (str, bool) -> str
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
:return: Generated stub code.
|
[
"Get",
"the",
"stub",
"code",
"for",
"a",
"source",
"code",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L745-L755
|
train
|
uyar/pygenstub
|
pygenstub.py
|
get_mod_paths
|
def get_mod_paths(mod_name, out_dir):
"""Get source and stub paths for a module."""
paths = []
try:
mod = get_loader(mod_name)
source = Path(mod.path)
if source.name.endswith(".py"):
source_rel = Path(*mod_name.split("."))
if source.name == "__init__.py":
source_rel = source_rel.joinpath("__init__.py")
destination = Path(out_dir, source_rel.with_suffix(".pyi"))
paths.append((source, destination))
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle module, skipping: %s", mod_name)
return paths
|
python
|
def get_mod_paths(mod_name, out_dir):
"""Get source and stub paths for a module."""
paths = []
try:
mod = get_loader(mod_name)
source = Path(mod.path)
if source.name.endswith(".py"):
source_rel = Path(*mod_name.split("."))
if source.name == "__init__.py":
source_rel = source_rel.joinpath("__init__.py")
destination = Path(out_dir, source_rel.with_suffix(".pyi"))
paths.append((source, destination))
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle module, skipping: %s", mod_name)
return paths
|
[
"def",
"get_mod_paths",
"(",
"mod_name",
",",
"out_dir",
")",
":",
"paths",
"=",
"[",
"]",
"try",
":",
"mod",
"=",
"get_loader",
"(",
"mod_name",
")",
"source",
"=",
"Path",
"(",
"mod",
".",
"path",
")",
"if",
"source",
".",
"name",
".",
"endswith",
"(",
"\".py\"",
")",
":",
"source_rel",
"=",
"Path",
"(",
"*",
"mod_name",
".",
"split",
"(",
"\".\"",
")",
")",
"if",
"source",
".",
"name",
"==",
"\"__init__.py\"",
":",
"source_rel",
"=",
"source_rel",
".",
"joinpath",
"(",
"\"__init__.py\"",
")",
"destination",
"=",
"Path",
"(",
"out_dir",
",",
"source_rel",
".",
"with_suffix",
"(",
"\".pyi\"",
")",
")",
"paths",
".",
"append",
"(",
"(",
"source",
",",
"destination",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"_logger",
".",
"debug",
"(",
"e",
")",
"_logger",
".",
"warning",
"(",
"\"cannot handle module, skipping: %s\"",
",",
"mod_name",
")",
"return",
"paths"
] |
Get source and stub paths for a module.
|
[
"Get",
"source",
"and",
"stub",
"paths",
"for",
"a",
"module",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L758-L773
|
train
|
uyar/pygenstub
|
pygenstub.py
|
get_pkg_paths
|
def get_pkg_paths(pkg_name, out_dir):
"""Recursively get all source and stub paths for a package."""
paths = []
try:
pkg = import_module(pkg_name)
if not hasattr(pkg, "__path__"):
return get_mod_paths(pkg_name, out_dir)
for mod_info in walk_packages(pkg.__path__, pkg.__name__ + "."):
mod_paths = get_mod_paths(mod_info.name, out_dir)
paths.extend(mod_paths)
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle package, skipping: %s", pkg_name)
return paths
|
python
|
def get_pkg_paths(pkg_name, out_dir):
"""Recursively get all source and stub paths for a package."""
paths = []
try:
pkg = import_module(pkg_name)
if not hasattr(pkg, "__path__"):
return get_mod_paths(pkg_name, out_dir)
for mod_info in walk_packages(pkg.__path__, pkg.__name__ + "."):
mod_paths = get_mod_paths(mod_info.name, out_dir)
paths.extend(mod_paths)
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle package, skipping: %s", pkg_name)
return paths
|
[
"def",
"get_pkg_paths",
"(",
"pkg_name",
",",
"out_dir",
")",
":",
"paths",
"=",
"[",
"]",
"try",
":",
"pkg",
"=",
"import_module",
"(",
"pkg_name",
")",
"if",
"not",
"hasattr",
"(",
"pkg",
",",
"\"__path__\"",
")",
":",
"return",
"get_mod_paths",
"(",
"pkg_name",
",",
"out_dir",
")",
"for",
"mod_info",
"in",
"walk_packages",
"(",
"pkg",
".",
"__path__",
",",
"pkg",
".",
"__name__",
"+",
"\".\"",
")",
":",
"mod_paths",
"=",
"get_mod_paths",
"(",
"mod_info",
".",
"name",
",",
"out_dir",
")",
"paths",
".",
"extend",
"(",
"mod_paths",
")",
"except",
"Exception",
"as",
"e",
":",
"_logger",
".",
"debug",
"(",
"e",
")",
"_logger",
".",
"warning",
"(",
"\"cannot handle package, skipping: %s\"",
",",
"pkg_name",
")",
"return",
"paths"
] |
Recursively get all source and stub paths for a package.
|
[
"Recursively",
"get",
"all",
"source",
"and",
"stub",
"paths",
"for",
"a",
"package",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L776-L789
|
train
|
uyar/pygenstub
|
pygenstub.py
|
process_docstring
|
def process_docstring(app, what, name, obj, options, lines):
"""Modify the docstring before generating documentation.
This will insert type declarations for parameters and return type
into the docstring, and remove the signature field so that it will
be excluded from the generated document.
"""
aliases = getattr(app, "_sigaliases", None)
if aliases is None:
if what == "module":
aliases = get_aliases(inspect.getsource(obj).splitlines())
app._sigaliases = aliases
sig_marker = ":" + SIG_FIELD + ":"
is_class = what in ("class", "exception")
signature = extract_signature("\n".join(lines))
if signature is None:
if not is_class:
return
init_method = getattr(obj, "__init__")
init_doc = init_method.__doc__
init_lines = init_doc.splitlines()[1:]
if len(init_lines) > 1:
init_doc = textwrap.dedent("\n".join(init_lines[1:]))
init_lines = init_doc.splitlines()
if sig_marker not in init_doc:
return
sig_started = False
for line in init_lines:
if line.lstrip().startswith(sig_marker):
sig_started = True
if sig_started:
lines.append(line)
signature = extract_signature("\n".join(lines))
if is_class:
obj = init_method
param_types, rtype, _ = parse_signature(signature)
param_names = [p for p in inspect.signature(obj).parameters]
if is_class and (param_names[0] == "self"):
del param_names[0]
# if something goes wrong, don't insert parameter types
if len(param_names) == len(param_types):
for name, type_ in zip(param_names, param_types):
find = ":param %(name)s:" % {"name": name}
alias = aliases.get(type_)
if alias is not None:
type_ = "*%(type)s* :sup:`%(alias)s`" % {"type": type_, "alias": alias}
for i, line in enumerate(lines):
if line.startswith(find):
lines.insert(i, ":type %(name)s: %(type)s" % {"name": name, "type": type_})
break
if not is_class:
for i, line in enumerate(lines):
if line.startswith((":return:", ":returns:")):
lines.insert(i, ":rtype: " + rtype)
break
# remove the signature field
sig_start = 0
while sig_start < len(lines):
if lines[sig_start].startswith(sig_marker):
break
sig_start += 1
sig_end = sig_start + 1
while sig_end < len(lines):
if (not lines[sig_end]) or (lines[sig_end][0] != " "):
break
sig_end += 1
for i in reversed(range(sig_start, sig_end)):
del lines[i]
|
python
|
def process_docstring(app, what, name, obj, options, lines):
"""Modify the docstring before generating documentation.
This will insert type declarations for parameters and return type
into the docstring, and remove the signature field so that it will
be excluded from the generated document.
"""
aliases = getattr(app, "_sigaliases", None)
if aliases is None:
if what == "module":
aliases = get_aliases(inspect.getsource(obj).splitlines())
app._sigaliases = aliases
sig_marker = ":" + SIG_FIELD + ":"
is_class = what in ("class", "exception")
signature = extract_signature("\n".join(lines))
if signature is None:
if not is_class:
return
init_method = getattr(obj, "__init__")
init_doc = init_method.__doc__
init_lines = init_doc.splitlines()[1:]
if len(init_lines) > 1:
init_doc = textwrap.dedent("\n".join(init_lines[1:]))
init_lines = init_doc.splitlines()
if sig_marker not in init_doc:
return
sig_started = False
for line in init_lines:
if line.lstrip().startswith(sig_marker):
sig_started = True
if sig_started:
lines.append(line)
signature = extract_signature("\n".join(lines))
if is_class:
obj = init_method
param_types, rtype, _ = parse_signature(signature)
param_names = [p for p in inspect.signature(obj).parameters]
if is_class and (param_names[0] == "self"):
del param_names[0]
# if something goes wrong, don't insert parameter types
if len(param_names) == len(param_types):
for name, type_ in zip(param_names, param_types):
find = ":param %(name)s:" % {"name": name}
alias = aliases.get(type_)
if alias is not None:
type_ = "*%(type)s* :sup:`%(alias)s`" % {"type": type_, "alias": alias}
for i, line in enumerate(lines):
if line.startswith(find):
lines.insert(i, ":type %(name)s: %(type)s" % {"name": name, "type": type_})
break
if not is_class:
for i, line in enumerate(lines):
if line.startswith((":return:", ":returns:")):
lines.insert(i, ":rtype: " + rtype)
break
# remove the signature field
sig_start = 0
while sig_start < len(lines):
if lines[sig_start].startswith(sig_marker):
break
sig_start += 1
sig_end = sig_start + 1
while sig_end < len(lines):
if (not lines[sig_end]) or (lines[sig_end][0] != " "):
break
sig_end += 1
for i in reversed(range(sig_start, sig_end)):
del lines[i]
|
[
"def",
"process_docstring",
"(",
"app",
",",
"what",
",",
"name",
",",
"obj",
",",
"options",
",",
"lines",
")",
":",
"aliases",
"=",
"getattr",
"(",
"app",
",",
"\"_sigaliases\"",
",",
"None",
")",
"if",
"aliases",
"is",
"None",
":",
"if",
"what",
"==",
"\"module\"",
":",
"aliases",
"=",
"get_aliases",
"(",
"inspect",
".",
"getsource",
"(",
"obj",
")",
".",
"splitlines",
"(",
")",
")",
"app",
".",
"_sigaliases",
"=",
"aliases",
"sig_marker",
"=",
"\":\"",
"+",
"SIG_FIELD",
"+",
"\":\"",
"is_class",
"=",
"what",
"in",
"(",
"\"class\"",
",",
"\"exception\"",
")",
"signature",
"=",
"extract_signature",
"(",
"\"\\n\"",
".",
"join",
"(",
"lines",
")",
")",
"if",
"signature",
"is",
"None",
":",
"if",
"not",
"is_class",
":",
"return",
"init_method",
"=",
"getattr",
"(",
"obj",
",",
"\"__init__\"",
")",
"init_doc",
"=",
"init_method",
".",
"__doc__",
"init_lines",
"=",
"init_doc",
".",
"splitlines",
"(",
")",
"[",
"1",
":",
"]",
"if",
"len",
"(",
"init_lines",
")",
">",
"1",
":",
"init_doc",
"=",
"textwrap",
".",
"dedent",
"(",
"\"\\n\"",
".",
"join",
"(",
"init_lines",
"[",
"1",
":",
"]",
")",
")",
"init_lines",
"=",
"init_doc",
".",
"splitlines",
"(",
")",
"if",
"sig_marker",
"not",
"in",
"init_doc",
":",
"return",
"sig_started",
"=",
"False",
"for",
"line",
"in",
"init_lines",
":",
"if",
"line",
".",
"lstrip",
"(",
")",
".",
"startswith",
"(",
"sig_marker",
")",
":",
"sig_started",
"=",
"True",
"if",
"sig_started",
":",
"lines",
".",
"append",
"(",
"line",
")",
"signature",
"=",
"extract_signature",
"(",
"\"\\n\"",
".",
"join",
"(",
"lines",
")",
")",
"if",
"is_class",
":",
"obj",
"=",
"init_method",
"param_types",
",",
"rtype",
",",
"_",
"=",
"parse_signature",
"(",
"signature",
")",
"param_names",
"=",
"[",
"p",
"for",
"p",
"in",
"inspect",
".",
"signature",
"(",
"obj",
")",
".",
"parameters",
"]",
"if",
"is_class",
"and",
"(",
"param_names",
"[",
"0",
"]",
"==",
"\"self\"",
")",
":",
"del",
"param_names",
"[",
"0",
"]",
"# if something goes wrong, don't insert parameter types",
"if",
"len",
"(",
"param_names",
")",
"==",
"len",
"(",
"param_types",
")",
":",
"for",
"name",
",",
"type_",
"in",
"zip",
"(",
"param_names",
",",
"param_types",
")",
":",
"find",
"=",
"\":param %(name)s:\"",
"%",
"{",
"\"name\"",
":",
"name",
"}",
"alias",
"=",
"aliases",
".",
"get",
"(",
"type_",
")",
"if",
"alias",
"is",
"not",
"None",
":",
"type_",
"=",
"\"*%(type)s* :sup:`%(alias)s`\"",
"%",
"{",
"\"type\"",
":",
"type_",
",",
"\"alias\"",
":",
"alias",
"}",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"find",
")",
":",
"lines",
".",
"insert",
"(",
"i",
",",
"\":type %(name)s: %(type)s\"",
"%",
"{",
"\"name\"",
":",
"name",
",",
"\"type\"",
":",
"type_",
"}",
")",
"break",
"if",
"not",
"is_class",
":",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"(",
"\":return:\"",
",",
"\":returns:\"",
")",
")",
":",
"lines",
".",
"insert",
"(",
"i",
",",
"\":rtype: \"",
"+",
"rtype",
")",
"break",
"# remove the signature field",
"sig_start",
"=",
"0",
"while",
"sig_start",
"<",
"len",
"(",
"lines",
")",
":",
"if",
"lines",
"[",
"sig_start",
"]",
".",
"startswith",
"(",
"sig_marker",
")",
":",
"break",
"sig_start",
"+=",
"1",
"sig_end",
"=",
"sig_start",
"+",
"1",
"while",
"sig_end",
"<",
"len",
"(",
"lines",
")",
":",
"if",
"(",
"not",
"lines",
"[",
"sig_end",
"]",
")",
"or",
"(",
"lines",
"[",
"sig_end",
"]",
"[",
"0",
"]",
"!=",
"\" \"",
")",
":",
"break",
"sig_end",
"+=",
"1",
"for",
"i",
"in",
"reversed",
"(",
"range",
"(",
"sig_start",
",",
"sig_end",
")",
")",
":",
"del",
"lines",
"[",
"i",
"]"
] |
Modify the docstring before generating documentation.
This will insert type declarations for parameters and return type
into the docstring, and remove the signature field so that it will
be excluded from the generated document.
|
[
"Modify",
"the",
"docstring",
"before",
"generating",
"documentation",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L797-L874
|
train
|
uyar/pygenstub
|
pygenstub.py
|
main
|
def main(argv=None):
"""Start the command line interface."""
parser = ArgumentParser(prog="pygenstub")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
parser.add_argument("files", nargs="*", help="generate stubs for given files")
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate stubs for given modules",
)
parser.add_argument(
"-o", "--output", metavar="PATH", dest="out_dir", help="change the output directory"
)
parser.add_argument(
"--generic", action="store_true", default=False, help="generate generic stubs"
)
parser.add_argument("--debug", action="store_true", help="enable debug messages")
argv = argv if argv is not None else sys.argv
arguments = parser.parse_args(argv[1:])
# set debug mode
if arguments.debug:
logging.basicConfig(level=logging.DEBUG)
_logger.debug("running in debug mode")
out_dir = arguments.out_dir if arguments.out_dir is not None else ""
if (out_dir == "") and (len(arguments.modules) > 0):
print("Output directory must be given when generating stubs for modules.")
sys.exit(1)
modules = []
for path in arguments.files:
paths = Path(path).glob("**/*.py") if Path(path).is_dir() else [Path(path)]
for source in paths:
if str(source).startswith(os.path.pardir):
source = source.absolute().resolve()
if (out_dir != "") and source.is_absolute():
source = source.relative_to(source.root)
destination = Path(out_dir, source.with_suffix(".pyi"))
modules.append((source, destination))
for mod_name in arguments.modules:
modules.extend(get_pkg_paths(mod_name, out_dir))
for source, destination in modules:
_logger.info("generating stub for %s to path %s", source, destination)
with source.open() as f:
code = f.read()
try:
stub = get_stub(code, generic=arguments.generic)
except Exception as e:
print(source, "-", e, file=sys.stderr)
continue
if stub != "":
if not destination.parent.exists():
destination.parent.mkdir(parents=True)
with destination.open("w") as f:
f.write("# " + EDIT_WARNING + "\n\n" + stub)
|
python
|
def main(argv=None):
"""Start the command line interface."""
parser = ArgumentParser(prog="pygenstub")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
parser.add_argument("files", nargs="*", help="generate stubs for given files")
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate stubs for given modules",
)
parser.add_argument(
"-o", "--output", metavar="PATH", dest="out_dir", help="change the output directory"
)
parser.add_argument(
"--generic", action="store_true", default=False, help="generate generic stubs"
)
parser.add_argument("--debug", action="store_true", help="enable debug messages")
argv = argv if argv is not None else sys.argv
arguments = parser.parse_args(argv[1:])
# set debug mode
if arguments.debug:
logging.basicConfig(level=logging.DEBUG)
_logger.debug("running in debug mode")
out_dir = arguments.out_dir if arguments.out_dir is not None else ""
if (out_dir == "") and (len(arguments.modules) > 0):
print("Output directory must be given when generating stubs for modules.")
sys.exit(1)
modules = []
for path in arguments.files:
paths = Path(path).glob("**/*.py") if Path(path).is_dir() else [Path(path)]
for source in paths:
if str(source).startswith(os.path.pardir):
source = source.absolute().resolve()
if (out_dir != "") and source.is_absolute():
source = source.relative_to(source.root)
destination = Path(out_dir, source.with_suffix(".pyi"))
modules.append((source, destination))
for mod_name in arguments.modules:
modules.extend(get_pkg_paths(mod_name, out_dir))
for source, destination in modules:
_logger.info("generating stub for %s to path %s", source, destination)
with source.open() as f:
code = f.read()
try:
stub = get_stub(code, generic=arguments.generic)
except Exception as e:
print(source, "-", e, file=sys.stderr)
continue
if stub != "":
if not destination.parent.exists():
destination.parent.mkdir(parents=True)
with destination.open("w") as f:
f.write("# " + EDIT_WARNING + "\n\n" + stub)
|
[
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"parser",
"=",
"ArgumentParser",
"(",
"prog",
"=",
"\"pygenstub\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--version\"",
",",
"action",
"=",
"\"version\"",
",",
"version",
"=",
"\"%(prog)s \"",
"+",
"__version__",
")",
"parser",
".",
"add_argument",
"(",
"\"files\"",
",",
"nargs",
"=",
"\"*\"",
",",
"help",
"=",
"\"generate stubs for given files\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-m\"",
",",
"\"--module\"",
",",
"action",
"=",
"\"append\"",
",",
"metavar",
"=",
"\"MODULE\"",
",",
"dest",
"=",
"\"modules\"",
",",
"default",
"=",
"[",
"]",
",",
"help",
"=",
"\"generate stubs for given modules\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--output\"",
",",
"metavar",
"=",
"\"PATH\"",
",",
"dest",
"=",
"\"out_dir\"",
",",
"help",
"=",
"\"change the output directory\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--generic\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"generate generic stubs\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--debug\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"enable debug messages\"",
")",
"argv",
"=",
"argv",
"if",
"argv",
"is",
"not",
"None",
"else",
"sys",
".",
"argv",
"arguments",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
"[",
"1",
":",
"]",
")",
"# set debug mode",
"if",
"arguments",
".",
"debug",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"_logger",
".",
"debug",
"(",
"\"running in debug mode\"",
")",
"out_dir",
"=",
"arguments",
".",
"out_dir",
"if",
"arguments",
".",
"out_dir",
"is",
"not",
"None",
"else",
"\"\"",
"if",
"(",
"out_dir",
"==",
"\"\"",
")",
"and",
"(",
"len",
"(",
"arguments",
".",
"modules",
")",
">",
"0",
")",
":",
"print",
"(",
"\"Output directory must be given when generating stubs for modules.\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"modules",
"=",
"[",
"]",
"for",
"path",
"in",
"arguments",
".",
"files",
":",
"paths",
"=",
"Path",
"(",
"path",
")",
".",
"glob",
"(",
"\"**/*.py\"",
")",
"if",
"Path",
"(",
"path",
")",
".",
"is_dir",
"(",
")",
"else",
"[",
"Path",
"(",
"path",
")",
"]",
"for",
"source",
"in",
"paths",
":",
"if",
"str",
"(",
"source",
")",
".",
"startswith",
"(",
"os",
".",
"path",
".",
"pardir",
")",
":",
"source",
"=",
"source",
".",
"absolute",
"(",
")",
".",
"resolve",
"(",
")",
"if",
"(",
"out_dir",
"!=",
"\"\"",
")",
"and",
"source",
".",
"is_absolute",
"(",
")",
":",
"source",
"=",
"source",
".",
"relative_to",
"(",
"source",
".",
"root",
")",
"destination",
"=",
"Path",
"(",
"out_dir",
",",
"source",
".",
"with_suffix",
"(",
"\".pyi\"",
")",
")",
"modules",
".",
"append",
"(",
"(",
"source",
",",
"destination",
")",
")",
"for",
"mod_name",
"in",
"arguments",
".",
"modules",
":",
"modules",
".",
"extend",
"(",
"get_pkg_paths",
"(",
"mod_name",
",",
"out_dir",
")",
")",
"for",
"source",
",",
"destination",
"in",
"modules",
":",
"_logger",
".",
"info",
"(",
"\"generating stub for %s to path %s\"",
",",
"source",
",",
"destination",
")",
"with",
"source",
".",
"open",
"(",
")",
"as",
"f",
":",
"code",
"=",
"f",
".",
"read",
"(",
")",
"try",
":",
"stub",
"=",
"get_stub",
"(",
"code",
",",
"generic",
"=",
"arguments",
".",
"generic",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"source",
",",
"\"-\"",
",",
"e",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"continue",
"if",
"stub",
"!=",
"\"\"",
":",
"if",
"not",
"destination",
".",
"parent",
".",
"exists",
"(",
")",
":",
"destination",
".",
"parent",
".",
"mkdir",
"(",
"parents",
"=",
"True",
")",
"with",
"destination",
".",
"open",
"(",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"# \"",
"+",
"EDIT_WARNING",
"+",
"\"\\n\\n\"",
"+",
"stub",
")"
] |
Start the command line interface.
|
[
"Start",
"the",
"command",
"line",
"interface",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L888-L951
|
train
|
uyar/pygenstub
|
pygenstub.py
|
StubNode.add_variable
|
def add_variable(self, node):
"""Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add.
"""
if node.name not in self.variable_names:
self.variables.append(node)
self.variable_names.add(node.name)
node.parent = self
|
python
|
def add_variable(self, node):
"""Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add.
"""
if node.name not in self.variable_names:
self.variables.append(node)
self.variable_names.add(node.name)
node.parent = self
|
[
"def",
"add_variable",
"(",
"self",
",",
"node",
")",
":",
"if",
"node",
".",
"name",
"not",
"in",
"self",
".",
"variable_names",
":",
"self",
".",
"variables",
".",
"append",
"(",
"node",
")",
"self",
".",
"variable_names",
".",
"add",
"(",
"node",
".",
"name",
")",
"node",
".",
"parent",
"=",
"self"
] |
Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add.
|
[
"Add",
"a",
"variable",
"node",
"to",
"this",
"node",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L195-L204
|
train
|
uyar/pygenstub
|
pygenstub.py
|
StubNode.get_code
|
def get_code(self):
"""Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node.
"""
stub = []
for child in self.variables:
stub.extend(child.get_code())
if (
(len(self.variables) > 0)
and (len(self.children) > 0)
and (not isinstance(self, ClassNode))
):
stub.append("")
for child in self.children:
stub.extend(child.get_code())
return stub
|
python
|
def get_code(self):
"""Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node.
"""
stub = []
for child in self.variables:
stub.extend(child.get_code())
if (
(len(self.variables) > 0)
and (len(self.children) > 0)
and (not isinstance(self, ClassNode))
):
stub.append("")
for child in self.children:
stub.extend(child.get_code())
return stub
|
[
"def",
"get_code",
"(",
"self",
")",
":",
"stub",
"=",
"[",
"]",
"for",
"child",
"in",
"self",
".",
"variables",
":",
"stub",
".",
"extend",
"(",
"child",
".",
"get_code",
"(",
")",
")",
"if",
"(",
"(",
"len",
"(",
"self",
".",
"variables",
")",
">",
"0",
")",
"and",
"(",
"len",
"(",
"self",
".",
"children",
")",
">",
"0",
")",
"and",
"(",
"not",
"isinstance",
"(",
"self",
",",
"ClassNode",
")",
")",
")",
":",
"stub",
".",
"append",
"(",
"\"\"",
")",
"for",
"child",
"in",
"self",
".",
"children",
":",
"stub",
".",
"extend",
"(",
"child",
".",
"get_code",
"(",
")",
")",
"return",
"stub"
] |
Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node.
|
[
"Get",
"the",
"stub",
"code",
"for",
"this",
"node",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L215-L235
|
train
|
uyar/pygenstub
|
pygenstub.py
|
FunctionNode.get_code
|
def get_code(self):
"""Get the stub code for this function.
:sig: () -> List[str]
:return: Lines of stub code for this function.
"""
stub = []
for deco in self.decorators:
if (deco in DECORATORS) or deco.endswith(".setter"):
stub.append("@" + deco)
parameters = []
for name, type_, has_default in self.parameters:
decl = "%(n)s%(t)s%(d)s" % {
"n": name,
"t": ": " + type_ if type_ else "",
"d": " = ..." if has_default else "",
}
parameters.append(decl)
slots = {
"a": "async " if self._async else "",
"n": self.name,
"p": ", ".join(parameters),
"r": self.rtype,
}
prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots
if len(prototype) <= LINE_LENGTH_LIMIT:
stub.append(prototype)
elif len(INDENT + slots["p"]) <= LINE_LENGTH_LIMIT:
stub.append("%(a)sdef %(n)s(" % slots)
stub.append(INDENT + slots["p"])
stub.append(") -> %(r)s: ..." % slots)
else:
stub.append("%(a)sdef %(n)s(" % slots)
for param in parameters:
stub.append(INDENT + param + ",")
stub.append(") -> %(r)s: ..." % slots)
return stub
|
python
|
def get_code(self):
"""Get the stub code for this function.
:sig: () -> List[str]
:return: Lines of stub code for this function.
"""
stub = []
for deco in self.decorators:
if (deco in DECORATORS) or deco.endswith(".setter"):
stub.append("@" + deco)
parameters = []
for name, type_, has_default in self.parameters:
decl = "%(n)s%(t)s%(d)s" % {
"n": name,
"t": ": " + type_ if type_ else "",
"d": " = ..." if has_default else "",
}
parameters.append(decl)
slots = {
"a": "async " if self._async else "",
"n": self.name,
"p": ", ".join(parameters),
"r": self.rtype,
}
prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots
if len(prototype) <= LINE_LENGTH_LIMIT:
stub.append(prototype)
elif len(INDENT + slots["p"]) <= LINE_LENGTH_LIMIT:
stub.append("%(a)sdef %(n)s(" % slots)
stub.append(INDENT + slots["p"])
stub.append(") -> %(r)s: ..." % slots)
else:
stub.append("%(a)sdef %(n)s(" % slots)
for param in parameters:
stub.append(INDENT + param + ",")
stub.append(") -> %(r)s: ..." % slots)
return stub
|
[
"def",
"get_code",
"(",
"self",
")",
":",
"stub",
"=",
"[",
"]",
"for",
"deco",
"in",
"self",
".",
"decorators",
":",
"if",
"(",
"deco",
"in",
"DECORATORS",
")",
"or",
"deco",
".",
"endswith",
"(",
"\".setter\"",
")",
":",
"stub",
".",
"append",
"(",
"\"@\"",
"+",
"deco",
")",
"parameters",
"=",
"[",
"]",
"for",
"name",
",",
"type_",
",",
"has_default",
"in",
"self",
".",
"parameters",
":",
"decl",
"=",
"\"%(n)s%(t)s%(d)s\"",
"%",
"{",
"\"n\"",
":",
"name",
",",
"\"t\"",
":",
"\": \"",
"+",
"type_",
"if",
"type_",
"else",
"\"\"",
",",
"\"d\"",
":",
"\" = ...\"",
"if",
"has_default",
"else",
"\"\"",
",",
"}",
"parameters",
".",
"append",
"(",
"decl",
")",
"slots",
"=",
"{",
"\"a\"",
":",
"\"async \"",
"if",
"self",
".",
"_async",
"else",
"\"\"",
",",
"\"n\"",
":",
"self",
".",
"name",
",",
"\"p\"",
":",
"\", \"",
".",
"join",
"(",
"parameters",
")",
",",
"\"r\"",
":",
"self",
".",
"rtype",
",",
"}",
"prototype",
"=",
"\"%(a)sdef %(n)s(%(p)s) -> %(r)s: ...\"",
"%",
"slots",
"if",
"len",
"(",
"prototype",
")",
"<=",
"LINE_LENGTH_LIMIT",
":",
"stub",
".",
"append",
"(",
"prototype",
")",
"elif",
"len",
"(",
"INDENT",
"+",
"slots",
"[",
"\"p\"",
"]",
")",
"<=",
"LINE_LENGTH_LIMIT",
":",
"stub",
".",
"append",
"(",
"\"%(a)sdef %(n)s(\"",
"%",
"slots",
")",
"stub",
".",
"append",
"(",
"INDENT",
"+",
"slots",
"[",
"\"p\"",
"]",
")",
"stub",
".",
"append",
"(",
"\") -> %(r)s: ...\"",
"%",
"slots",
")",
"else",
":",
"stub",
".",
"append",
"(",
"\"%(a)sdef %(n)s(\"",
"%",
"slots",
")",
"for",
"param",
"in",
"parameters",
":",
"stub",
".",
"append",
"(",
"INDENT",
"+",
"param",
"+",
"\",\"",
")",
"stub",
".",
"append",
"(",
"\") -> %(r)s: ...\"",
"%",
"slots",
")",
"return",
"stub"
] |
Get the stub code for this function.
:sig: () -> List[str]
:return: Lines of stub code for this function.
|
[
"Get",
"the",
"stub",
"code",
"for",
"this",
"function",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L290-L331
|
train
|
uyar/pygenstub
|
pygenstub.py
|
ClassNode.get_code
|
def get_code(self):
"""Get the stub code for this class.
:sig: () -> List[str]
:return: Lines of stub code for this class.
"""
stub = []
bases = ("(" + ", ".join(self.bases) + ")") if len(self.bases) > 0 else ""
slots = {"n": self.name, "b": bases}
if (len(self.children) == 0) and (len(self.variables) == 0):
stub.append("class %(n)s%(b)s: ..." % slots)
else:
stub.append("class %(n)s%(b)s:" % slots)
super_code = super().get_code() if PY3 else StubNode.get_code(self)
for line in super_code:
stub.append(INDENT + line)
return stub
|
python
|
def get_code(self):
"""Get the stub code for this class.
:sig: () -> List[str]
:return: Lines of stub code for this class.
"""
stub = []
bases = ("(" + ", ".join(self.bases) + ")") if len(self.bases) > 0 else ""
slots = {"n": self.name, "b": bases}
if (len(self.children) == 0) and (len(self.variables) == 0):
stub.append("class %(n)s%(b)s: ..." % slots)
else:
stub.append("class %(n)s%(b)s:" % slots)
super_code = super().get_code() if PY3 else StubNode.get_code(self)
for line in super_code:
stub.append(INDENT + line)
return stub
|
[
"def",
"get_code",
"(",
"self",
")",
":",
"stub",
"=",
"[",
"]",
"bases",
"=",
"(",
"\"(\"",
"+",
"\", \"",
".",
"join",
"(",
"self",
".",
"bases",
")",
"+",
"\")\"",
")",
"if",
"len",
"(",
"self",
".",
"bases",
")",
">",
"0",
"else",
"\"\"",
"slots",
"=",
"{",
"\"n\"",
":",
"self",
".",
"name",
",",
"\"b\"",
":",
"bases",
"}",
"if",
"(",
"len",
"(",
"self",
".",
"children",
")",
"==",
"0",
")",
"and",
"(",
"len",
"(",
"self",
".",
"variables",
")",
"==",
"0",
")",
":",
"stub",
".",
"append",
"(",
"\"class %(n)s%(b)s: ...\"",
"%",
"slots",
")",
"else",
":",
"stub",
".",
"append",
"(",
"\"class %(n)s%(b)s:\"",
"%",
"slots",
")",
"super_code",
"=",
"super",
"(",
")",
".",
"get_code",
"(",
")",
"if",
"PY3",
"else",
"StubNode",
".",
"get_code",
"(",
"self",
")",
"for",
"line",
"in",
"super_code",
":",
"stub",
".",
"append",
"(",
"INDENT",
"+",
"line",
")",
"return",
"stub"
] |
Get the stub code for this class.
:sig: () -> List[str]
:return: Lines of stub code for this class.
|
[
"Get",
"the",
"stub",
"code",
"for",
"this",
"class",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L353-L369
|
train
|
uyar/pygenstub
|
pygenstub.py
|
StubGenerator.collect_aliases
|
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
|
python
|
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
|
[
"def",
"collect_aliases",
"(",
"self",
")",
":",
"self",
".",
"aliases",
"=",
"get_aliases",
"(",
"self",
".",
"_code_lines",
")",
"for",
"alias",
",",
"signature",
"in",
"self",
".",
"aliases",
".",
"items",
"(",
")",
":",
"_",
",",
"_",
",",
"requires",
"=",
"parse_signature",
"(",
"signature",
")",
"self",
".",
"required_types",
"|=",
"requires",
"self",
".",
"defined_types",
"|=",
"{",
"alias",
"}"
] |
Collect the type aliases in the source.
:sig: () -> None
|
[
"Collect",
"the",
"type",
"aliases",
"in",
"the",
"source",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L417-L426
|
train
|
uyar/pygenstub
|
pygenstub.py
|
StubGenerator.visit_Import
|
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
|
python
|
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
|
[
"def",
"visit_Import",
"(",
"self",
",",
"node",
")",
":",
"line",
"=",
"self",
".",
"_code_lines",
"[",
"node",
".",
"lineno",
"-",
"1",
"]",
"module_name",
"=",
"line",
".",
"split",
"(",
"\"import\"",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"for",
"name",
"in",
"node",
".",
"names",
":",
"imported_name",
"=",
"name",
".",
"name",
"if",
"name",
".",
"asname",
":",
"imported_name",
"=",
"name",
".",
"asname",
"+",
"\"::\"",
"+",
"imported_name",
"self",
".",
"imported_namespaces",
"[",
"imported_name",
"]",
"=",
"module_name"
] |
Visit an import node.
|
[
"Visit",
"an",
"import",
"node",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L428-L436
|
train
|
uyar/pygenstub
|
pygenstub.py
|
StubGenerator.visit_ImportFrom
|
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
|
python
|
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
|
[
"def",
"visit_ImportFrom",
"(",
"self",
",",
"node",
")",
":",
"line",
"=",
"self",
".",
"_code_lines",
"[",
"node",
".",
"lineno",
"-",
"1",
"]",
"module_name",
"=",
"line",
".",
"split",
"(",
"\"from\"",
")",
"[",
"1",
"]",
".",
"split",
"(",
"\"import\"",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"for",
"name",
"in",
"node",
".",
"names",
":",
"imported_name",
"=",
"name",
".",
"name",
"if",
"name",
".",
"asname",
":",
"imported_name",
"=",
"name",
".",
"asname",
"+",
"\"::\"",
"+",
"imported_name",
"self",
".",
"imported_names",
"[",
"imported_name",
"]",
"=",
"module_name"
] |
Visit an from-import node.
|
[
"Visit",
"an",
"from",
"-",
"import",
"node",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L438-L446
|
train
|
uyar/pygenstub
|
pygenstub.py
|
StubGenerator.visit_Assign
|
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
|
python
|
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
|
[
"def",
"visit_Assign",
"(",
"self",
",",
"node",
")",
":",
"line",
"=",
"self",
".",
"_code_lines",
"[",
"node",
".",
"lineno",
"-",
"1",
"]",
"if",
"SIG_COMMENT",
"in",
"line",
":",
"line",
"=",
"_RE_COMMENT_IN_STRING",
".",
"sub",
"(",
"\"\"",
",",
"line",
")",
"if",
"(",
"SIG_COMMENT",
"not",
"in",
"line",
")",
"and",
"(",
"not",
"self",
".",
"generic",
")",
":",
"return",
"if",
"SIG_COMMENT",
"in",
"line",
":",
"_",
",",
"signature",
"=",
"line",
".",
"split",
"(",
"SIG_COMMENT",
")",
"_",
",",
"return_type",
",",
"requires",
"=",
"parse_signature",
"(",
"signature",
")",
"self",
".",
"required_types",
"|=",
"requires",
"parent",
"=",
"self",
".",
"_parents",
"[",
"-",
"1",
"]",
"for",
"var",
"in",
"node",
".",
"targets",
":",
"if",
"isinstance",
"(",
"var",
",",
"ast",
".",
"Name",
")",
":",
"name",
",",
"p",
"=",
"var",
".",
"id",
",",
"parent",
"elif",
"(",
"isinstance",
"(",
"var",
",",
"ast",
".",
"Attribute",
")",
"and",
"isinstance",
"(",
"var",
".",
"value",
",",
"ast",
".",
"Name",
")",
"and",
"(",
"var",
".",
"value",
".",
"id",
"==",
"\"self\"",
")",
")",
":",
"name",
",",
"p",
"=",
"var",
".",
"attr",
",",
"parent",
".",
"parent",
"else",
":",
"name",
",",
"p",
"=",
"None",
",",
"None",
"if",
"name",
"is",
"not",
"None",
":",
"if",
"self",
".",
"generic",
":",
"return_type",
"=",
"\"Any\"",
"self",
".",
"required_types",
".",
"add",
"(",
"return_type",
")",
"stub_node",
"=",
"VariableNode",
"(",
"name",
",",
"return_type",
")",
"p",
".",
"add_variable",
"(",
"stub_node",
")"
] |
Visit an assignment node.
|
[
"Visit",
"an",
"assignment",
"node",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L448-L480
|
train
|
uyar/pygenstub
|
pygenstub.py
|
StubGenerator.visit_FunctionDef
|
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
|
python
|
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
|
[
"def",
"visit_FunctionDef",
"(",
"self",
",",
"node",
")",
":",
"node",
"=",
"self",
".",
"get_function_node",
"(",
"node",
")",
"if",
"node",
"is",
"not",
"None",
":",
"node",
".",
"_async",
"=",
"False"
] |
Visit a function node.
|
[
"Visit",
"a",
"function",
"node",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L577-L581
|
train
|
uyar/pygenstub
|
pygenstub.py
|
StubGenerator.visit_AsyncFunctionDef
|
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
|
python
|
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
|
[
"def",
"visit_AsyncFunctionDef",
"(",
"self",
",",
"node",
")",
":",
"node",
"=",
"self",
".",
"get_function_node",
"(",
"node",
")",
"if",
"node",
"is",
"not",
"None",
":",
"node",
".",
"_async",
"=",
"True"
] |
Visit an async function node.
|
[
"Visit",
"an",
"async",
"function",
"node",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L583-L587
|
train
|
uyar/pygenstub
|
pygenstub.py
|
StubGenerator.visit_ClassDef
|
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
|
python
|
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
|
[
"def",
"visit_ClassDef",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"defined_types",
".",
"add",
"(",
"node",
".",
"name",
")",
"bases",
"=",
"[",
"]",
"for",
"n",
"in",
"node",
".",
"bases",
":",
"base_parts",
"=",
"[",
"]",
"while",
"True",
":",
"if",
"not",
"isinstance",
"(",
"n",
",",
"ast",
".",
"Attribute",
")",
":",
"base_parts",
".",
"append",
"(",
"n",
".",
"id",
")",
"break",
"else",
":",
"base_parts",
".",
"append",
"(",
"n",
".",
"attr",
")",
"n",
"=",
"n",
".",
"value",
"bases",
".",
"append",
"(",
"\".\"",
".",
"join",
"(",
"base_parts",
"[",
":",
":",
"-",
"1",
"]",
")",
")",
"self",
".",
"required_types",
"|=",
"set",
"(",
"bases",
")",
"signature",
"=",
"get_signature",
"(",
"node",
")",
"stub_node",
"=",
"ClassNode",
"(",
"node",
".",
"name",
",",
"bases",
"=",
"bases",
",",
"signature",
"=",
"signature",
")",
"self",
".",
"_parents",
"[",
"-",
"1",
"]",
".",
"add_child",
"(",
"stub_node",
")",
"self",
".",
"_parents",
".",
"append",
"(",
"stub_node",
")",
"self",
".",
"generic_visit",
"(",
"node",
")",
"del",
"self",
".",
"_parents",
"[",
"-",
"1",
"]"
] |
Visit a class node.
|
[
"Visit",
"a",
"class",
"node",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L589-L612
|
train
|
uyar/pygenstub
|
pygenstub.py
|
StubGenerator.generate_import_from
|
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
|
python
|
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
|
[
"def",
"generate_import_from",
"(",
"module_",
",",
"names",
")",
":",
"regular_names",
"=",
"[",
"n",
"for",
"n",
"in",
"names",
"if",
"\"::\"",
"not",
"in",
"n",
"]",
"as_names",
"=",
"[",
"n",
"for",
"n",
"in",
"names",
"if",
"\"::\"",
"in",
"n",
"]",
"line",
"=",
"\"\"",
"if",
"len",
"(",
"regular_names",
")",
">",
"0",
":",
"slots",
"=",
"{",
"\"m\"",
":",
"module_",
",",
"\"n\"",
":",
"\", \"",
".",
"join",
"(",
"sorted",
"(",
"regular_names",
")",
")",
"}",
"line",
"=",
"\"from %(m)s import %(n)s\"",
"%",
"slots",
"if",
"len",
"(",
"line",
")",
">",
"LINE_LENGTH_LIMIT",
":",
"slots",
"[",
"\"n\"",
"]",
"=",
"INDENT",
"+",
"(",
"\",\\n\"",
"+",
"INDENT",
")",
".",
"join",
"(",
"sorted",
"(",
"regular_names",
")",
")",
"+",
"\",\"",
"line",
"=",
"\"from %(m)s import (\\n%(n)s\\n)\"",
"%",
"slots",
"if",
"len",
"(",
"as_names",
")",
">",
"0",
":",
"line",
"+=",
"\"\\n\"",
"for",
"as_name",
"in",
"as_names",
":",
"a",
",",
"n",
"=",
"as_name",
".",
"split",
"(",
"\"::\"",
")",
"line",
"+=",
"\"from %(m)s import %(n)s as %(a)s\"",
"%",
"{",
"\"m\"",
":",
"module_",
",",
"\"n\"",
":",
"n",
",",
"\"a\"",
":",
"a",
"}",
"return",
"line"
] |
Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
|
[
"Generate",
"an",
"import",
"line",
"."
] |
a6b18a823382d3c6be29c411fb33c58b6090d22c
|
https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L615-L639
|
train
|
rochapps/django-csv-exports
|
django_csv_exports/admin.py
|
CSVExportAdmin.has_csv_permission
|
def has_csv_permission(self, request, obj=None):
"""
Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses. By default, we assume
all staff users can use this action unless `DJANGO_EXPORTS_REQUIRE_PERM`
is set to True in your django settings.
"""
if getattr(settings, 'DJANGO_EXPORTS_REQUIRE_PERM', None):
opts = self.opts
codename = '%s_%s' % ('csv', opts.object_name.lower())
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
return True
|
python
|
def has_csv_permission(self, request, obj=None):
"""
Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses. By default, we assume
all staff users can use this action unless `DJANGO_EXPORTS_REQUIRE_PERM`
is set to True in your django settings.
"""
if getattr(settings, 'DJANGO_EXPORTS_REQUIRE_PERM', None):
opts = self.opts
codename = '%s_%s' % ('csv', opts.object_name.lower())
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
return True
|
[
"def",
"has_csv_permission",
"(",
"self",
",",
"request",
",",
"obj",
"=",
"None",
")",
":",
"if",
"getattr",
"(",
"settings",
",",
"'DJANGO_EXPORTS_REQUIRE_PERM'",
",",
"None",
")",
":",
"opts",
"=",
"self",
".",
"opts",
"codename",
"=",
"'%s_%s'",
"%",
"(",
"'csv'",
",",
"opts",
".",
"object_name",
".",
"lower",
"(",
")",
")",
"return",
"request",
".",
"user",
".",
"has_perm",
"(",
"\"%s.%s\"",
"%",
"(",
"opts",
".",
"app_label",
",",
"codename",
")",
")",
"return",
"True"
] |
Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses. By default, we assume
all staff users can use this action unless `DJANGO_EXPORTS_REQUIRE_PERM`
is set to True in your django settings.
|
[
"Returns",
"True",
"if",
"the",
"given",
"request",
"has",
"permission",
"to",
"add",
"an",
"object",
".",
"Can",
"be",
"overridden",
"by",
"the",
"user",
"in",
"subclasses",
".",
"By",
"default",
"we",
"assume",
"all",
"staff",
"users",
"can",
"use",
"this",
"action",
"unless",
"DJANGO_EXPORTS_REQUIRE_PERM",
"is",
"set",
"to",
"True",
"in",
"your",
"django",
"settings",
"."
] |
efcdde401d66f38a64b37afa909bfc16a6c21e9e
|
https://github.com/rochapps/django-csv-exports/blob/efcdde401d66f38a64b37afa909bfc16a6c21e9e/django_csv_exports/admin.py#L54-L65
|
train
|
zhemao/funktown
|
funktown/dictionary.py
|
ImmutableDict.assoc
|
def assoc(self, key, value):
'''Returns a new ImmutableDict instance with value associated with key.
The implicit parameter is not modified.'''
copydict = ImmutableDict()
copydict.tree = self.tree.assoc(hash(key), (key, value))
copydict._length = self._length + 1
return copydict
|
python
|
def assoc(self, key, value):
'''Returns a new ImmutableDict instance with value associated with key.
The implicit parameter is not modified.'''
copydict = ImmutableDict()
copydict.tree = self.tree.assoc(hash(key), (key, value))
copydict._length = self._length + 1
return copydict
|
[
"def",
"assoc",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"copydict",
"=",
"ImmutableDict",
"(",
")",
"copydict",
".",
"tree",
"=",
"self",
".",
"tree",
".",
"assoc",
"(",
"hash",
"(",
"key",
")",
",",
"(",
"key",
",",
"value",
")",
")",
"copydict",
".",
"_length",
"=",
"self",
".",
"_length",
"+",
"1",
"return",
"copydict"
] |
Returns a new ImmutableDict instance with value associated with key.
The implicit parameter is not modified.
|
[
"Returns",
"a",
"new",
"ImmutableDict",
"instance",
"with",
"value",
"associated",
"with",
"key",
".",
"The",
"implicit",
"parameter",
"is",
"not",
"modified",
"."
] |
8d5c5a8bdad2b85b33b4cea3febd820c2657c375
|
https://github.com/zhemao/funktown/blob/8d5c5a8bdad2b85b33b4cea3febd820c2657c375/funktown/dictionary.py#L22-L28
|
train
|
zhemao/funktown
|
funktown/dictionary.py
|
ImmutableDict.update
|
def update(self, other=None, **kwargs):
'''Takes the same arguments as the update method in the builtin dict
class. However, this version returns a new ImmutableDict instead of
modifying in-place.'''
copydict = ImmutableDict()
if other:
vallist = [(hash(key), (key, other[key])) for key in other]
else: vallist = []
if kwargs:
vallist += [(hash(key), (key, kwargs[key])) for key in kwargs]
copydict.tree = self.tree.multi_assoc(vallist)
copydict._length = iter_length(copydict.tree)
return copydict
|
python
|
def update(self, other=None, **kwargs):
'''Takes the same arguments as the update method in the builtin dict
class. However, this version returns a new ImmutableDict instead of
modifying in-place.'''
copydict = ImmutableDict()
if other:
vallist = [(hash(key), (key, other[key])) for key in other]
else: vallist = []
if kwargs:
vallist += [(hash(key), (key, kwargs[key])) for key in kwargs]
copydict.tree = self.tree.multi_assoc(vallist)
copydict._length = iter_length(copydict.tree)
return copydict
|
[
"def",
"update",
"(",
"self",
",",
"other",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"copydict",
"=",
"ImmutableDict",
"(",
")",
"if",
"other",
":",
"vallist",
"=",
"[",
"(",
"hash",
"(",
"key",
")",
",",
"(",
"key",
",",
"other",
"[",
"key",
"]",
")",
")",
"for",
"key",
"in",
"other",
"]",
"else",
":",
"vallist",
"=",
"[",
"]",
"if",
"kwargs",
":",
"vallist",
"+=",
"[",
"(",
"hash",
"(",
"key",
")",
",",
"(",
"key",
",",
"kwargs",
"[",
"key",
"]",
")",
")",
"for",
"key",
"in",
"kwargs",
"]",
"copydict",
".",
"tree",
"=",
"self",
".",
"tree",
".",
"multi_assoc",
"(",
"vallist",
")",
"copydict",
".",
"_length",
"=",
"iter_length",
"(",
"copydict",
".",
"tree",
")",
"return",
"copydict"
] |
Takes the same arguments as the update method in the builtin dict
class. However, this version returns a new ImmutableDict instead of
modifying in-place.
|
[
"Takes",
"the",
"same",
"arguments",
"as",
"the",
"update",
"method",
"in",
"the",
"builtin",
"dict",
"class",
".",
"However",
"this",
"version",
"returns",
"a",
"new",
"ImmutableDict",
"instead",
"of",
"modifying",
"in",
"-",
"place",
"."
] |
8d5c5a8bdad2b85b33b4cea3febd820c2657c375
|
https://github.com/zhemao/funktown/blob/8d5c5a8bdad2b85b33b4cea3febd820c2657c375/funktown/dictionary.py#L30-L42
|
train
|
zhemao/funktown
|
funktown/dictionary.py
|
ImmutableDict.remove
|
def remove(self, key):
'''Returns a new ImmutableDict with the given key removed.'''
copydict = ImmutableDict()
copydict.tree = self.tree.remove(hash(key))
copydict._length = self._length - 1
return copydict
|
python
|
def remove(self, key):
'''Returns a new ImmutableDict with the given key removed.'''
copydict = ImmutableDict()
copydict.tree = self.tree.remove(hash(key))
copydict._length = self._length - 1
return copydict
|
[
"def",
"remove",
"(",
"self",
",",
"key",
")",
":",
"copydict",
"=",
"ImmutableDict",
"(",
")",
"copydict",
".",
"tree",
"=",
"self",
".",
"tree",
".",
"remove",
"(",
"hash",
"(",
"key",
")",
")",
"copydict",
".",
"_length",
"=",
"self",
".",
"_length",
"-",
"1",
"return",
"copydict"
] |
Returns a new ImmutableDict with the given key removed.
|
[
"Returns",
"a",
"new",
"ImmutableDict",
"with",
"the",
"given",
"key",
"removed",
"."
] |
8d5c5a8bdad2b85b33b4cea3febd820c2657c375
|
https://github.com/zhemao/funktown/blob/8d5c5a8bdad2b85b33b4cea3febd820c2657c375/funktown/dictionary.py#L44-L49
|
train
|
thiagokokada/livedumper
|
src/livedumper/dumper.py
|
LivestreamerDumper._load_config
|
def _load_config(self):
"Load and parse config file, pass options to livestreamer"
config = SafeConfigParser()
config_file = os.path.join(self.config_path, 'settings.ini')
config.read(config_file)
for option, type in list(AVAILABLE_OPTIONS.items()):
if config.has_option('DEFAULT', option):
if type == 'int':
value = config.getint('DEFAULT', option)
if type == 'float':
value = config.getfloat('DEFAULT', option)
if type == 'bool':
value = config.getboolean('DEFAULT', option)
if type == 'str':
value = config.get('DEFAULT', option)
self.livestreamer.set_option(option, value)
|
python
|
def _load_config(self):
"Load and parse config file, pass options to livestreamer"
config = SafeConfigParser()
config_file = os.path.join(self.config_path, 'settings.ini')
config.read(config_file)
for option, type in list(AVAILABLE_OPTIONS.items()):
if config.has_option('DEFAULT', option):
if type == 'int':
value = config.getint('DEFAULT', option)
if type == 'float':
value = config.getfloat('DEFAULT', option)
if type == 'bool':
value = config.getboolean('DEFAULT', option)
if type == 'str':
value = config.get('DEFAULT', option)
self.livestreamer.set_option(option, value)
|
[
"def",
"_load_config",
"(",
"self",
")",
":",
"config",
"=",
"SafeConfigParser",
"(",
")",
"config_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"config_path",
",",
"'settings.ini'",
")",
"config",
".",
"read",
"(",
"config_file",
")",
"for",
"option",
",",
"type",
"in",
"list",
"(",
"AVAILABLE_OPTIONS",
".",
"items",
"(",
")",
")",
":",
"if",
"config",
".",
"has_option",
"(",
"'DEFAULT'",
",",
"option",
")",
":",
"if",
"type",
"==",
"'int'",
":",
"value",
"=",
"config",
".",
"getint",
"(",
"'DEFAULT'",
",",
"option",
")",
"if",
"type",
"==",
"'float'",
":",
"value",
"=",
"config",
".",
"getfloat",
"(",
"'DEFAULT'",
",",
"option",
")",
"if",
"type",
"==",
"'bool'",
":",
"value",
"=",
"config",
".",
"getboolean",
"(",
"'DEFAULT'",
",",
"option",
")",
"if",
"type",
"==",
"'str'",
":",
"value",
"=",
"config",
".",
"get",
"(",
"'DEFAULT'",
",",
"option",
")",
"self",
".",
"livestreamer",
".",
"set_option",
"(",
"option",
",",
"value",
")"
] |
Load and parse config file, pass options to livestreamer
|
[
"Load",
"and",
"parse",
"config",
"file",
"pass",
"options",
"to",
"livestreamer"
] |
f6441283269b4a602cafea3be5cda9446fc64005
|
https://github.com/thiagokokada/livedumper/blob/f6441283269b4a602cafea3be5cda9446fc64005/src/livedumper/dumper.py#L116-L134
|
train
|
Capitains/MyCapytain
|
MyCapytain/resources/prototypes/cts/text.py
|
PrototypeCtsNode.urn
|
def urn(self, value: Union[URN, str]):
""" Set the urn
:param value: URN to be saved
:raises: *TypeError* when the value is not URN compatible
"""
if isinstance(value, str):
value = URN(value)
elif not isinstance(value, URN):
raise TypeError("New urn must be string or {} instead of {}".format(type(URN), type(value)))
self._urn = value
|
python
|
def urn(self, value: Union[URN, str]):
""" Set the urn
:param value: URN to be saved
:raises: *TypeError* when the value is not URN compatible
"""
if isinstance(value, str):
value = URN(value)
elif not isinstance(value, URN):
raise TypeError("New urn must be string or {} instead of {}".format(type(URN), type(value)))
self._urn = value
|
[
"def",
"urn",
"(",
"self",
",",
"value",
":",
"Union",
"[",
"URN",
",",
"str",
"]",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"value",
"=",
"URN",
"(",
"value",
")",
"elif",
"not",
"isinstance",
"(",
"value",
",",
"URN",
")",
":",
"raise",
"TypeError",
"(",
"\"New urn must be string or {} instead of {}\"",
".",
"format",
"(",
"type",
"(",
"URN",
")",
",",
"type",
"(",
"value",
")",
")",
")",
"self",
".",
"_urn",
"=",
"value"
] |
Set the urn
:param value: URN to be saved
:raises: *TypeError* when the value is not URN compatible
|
[
"Set",
"the",
"urn"
] |
b11bbf6b6ae141fc02be70471e3fbf6907be6593
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/text.py#L55-L66
|
train
|
Capitains/MyCapytain
|
MyCapytain/resources/prototypes/cts/text.py
|
PrototypeCtsNode.get_cts_metadata
|
def get_cts_metadata(self, key: str, lang: str = None) -> Literal:
""" Get easily a metadata from the CTS namespace
:param key: CTS property to retrieve
:param lang: Language in which it should be
:return: Literal value of the CTS graph property
"""
return self.metadata.get_single(RDF_NAMESPACES.CTS.term(key), lang)
|
python
|
def get_cts_metadata(self, key: str, lang: str = None) -> Literal:
""" Get easily a metadata from the CTS namespace
:param key: CTS property to retrieve
:param lang: Language in which it should be
:return: Literal value of the CTS graph property
"""
return self.metadata.get_single(RDF_NAMESPACES.CTS.term(key), lang)
|
[
"def",
"get_cts_metadata",
"(",
"self",
",",
"key",
":",
"str",
",",
"lang",
":",
"str",
"=",
"None",
")",
"->",
"Literal",
":",
"return",
"self",
".",
"metadata",
".",
"get_single",
"(",
"RDF_NAMESPACES",
".",
"CTS",
".",
"term",
"(",
"key",
")",
",",
"lang",
")"
] |
Get easily a metadata from the CTS namespace
:param key: CTS property to retrieve
:param lang: Language in which it should be
:return: Literal value of the CTS graph property
|
[
"Get",
"easily",
"a",
"metadata",
"from",
"the",
"CTS",
"namespace"
] |
b11bbf6b6ae141fc02be70471e3fbf6907be6593
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/text.py#L68-L75
|
train
|
Capitains/MyCapytain
|
MyCapytain/resources/prototypes/cts/text.py
|
PrototypeCtsNode.set_metadata_from_collection
|
def set_metadata_from_collection(self, text_metadata: CtsTextMetadata):
""" Set the object metadata using its collections recursively
:param text_metadata: Object representing the current text as a collection
:type text_metadata: CtsEditionMetadata or CtsTranslationMetadata
"""
edition, work, textgroup = tuple(([text_metadata] + text_metadata.parents)[:3])
for node in textgroup.metadata.get(RDF_NAMESPACES.CTS.groupname):
lang = node.language
self.metadata.add(RDF_NAMESPACES.CTS.groupname, lang=lang, value=str(node))
self.set_creator(str(node), lang)
for node in work.metadata.get(RDF_NAMESPACES.CTS.title):
lang = node.language
self.metadata.add(RDF_NAMESPACES.CTS.title, lang=lang, value=str(node))
self.set_title(str(node), lang)
for node in edition.metadata.get(RDF_NAMESPACES.CTS.label):
lang = node.language
self.metadata.add(RDF_NAMESPACES.CTS.label, lang=lang, value=str(node))
self.set_subject(str(node), lang)
for node in edition.metadata.get(RDF_NAMESPACES.CTS.description):
lang = node.language
self.metadata.add(RDF_NAMESPACES.CTS.description, lang=lang, value=str(node))
self.set_description(str(node), lang)
if not self.citation.is_set() and edition.citation.is_set():
self.citation = edition.citation
|
python
|
def set_metadata_from_collection(self, text_metadata: CtsTextMetadata):
""" Set the object metadata using its collections recursively
:param text_metadata: Object representing the current text as a collection
:type text_metadata: CtsEditionMetadata or CtsTranslationMetadata
"""
edition, work, textgroup = tuple(([text_metadata] + text_metadata.parents)[:3])
for node in textgroup.metadata.get(RDF_NAMESPACES.CTS.groupname):
lang = node.language
self.metadata.add(RDF_NAMESPACES.CTS.groupname, lang=lang, value=str(node))
self.set_creator(str(node), lang)
for node in work.metadata.get(RDF_NAMESPACES.CTS.title):
lang = node.language
self.metadata.add(RDF_NAMESPACES.CTS.title, lang=lang, value=str(node))
self.set_title(str(node), lang)
for node in edition.metadata.get(RDF_NAMESPACES.CTS.label):
lang = node.language
self.metadata.add(RDF_NAMESPACES.CTS.label, lang=lang, value=str(node))
self.set_subject(str(node), lang)
for node in edition.metadata.get(RDF_NAMESPACES.CTS.description):
lang = node.language
self.metadata.add(RDF_NAMESPACES.CTS.description, lang=lang, value=str(node))
self.set_description(str(node), lang)
if not self.citation.is_set() and edition.citation.is_set():
self.citation = edition.citation
|
[
"def",
"set_metadata_from_collection",
"(",
"self",
",",
"text_metadata",
":",
"CtsTextMetadata",
")",
":",
"edition",
",",
"work",
",",
"textgroup",
"=",
"tuple",
"(",
"(",
"[",
"text_metadata",
"]",
"+",
"text_metadata",
".",
"parents",
")",
"[",
":",
"3",
"]",
")",
"for",
"node",
"in",
"textgroup",
".",
"metadata",
".",
"get",
"(",
"RDF_NAMESPACES",
".",
"CTS",
".",
"groupname",
")",
":",
"lang",
"=",
"node",
".",
"language",
"self",
".",
"metadata",
".",
"add",
"(",
"RDF_NAMESPACES",
".",
"CTS",
".",
"groupname",
",",
"lang",
"=",
"lang",
",",
"value",
"=",
"str",
"(",
"node",
")",
")",
"self",
".",
"set_creator",
"(",
"str",
"(",
"node",
")",
",",
"lang",
")",
"for",
"node",
"in",
"work",
".",
"metadata",
".",
"get",
"(",
"RDF_NAMESPACES",
".",
"CTS",
".",
"title",
")",
":",
"lang",
"=",
"node",
".",
"language",
"self",
".",
"metadata",
".",
"add",
"(",
"RDF_NAMESPACES",
".",
"CTS",
".",
"title",
",",
"lang",
"=",
"lang",
",",
"value",
"=",
"str",
"(",
"node",
")",
")",
"self",
".",
"set_title",
"(",
"str",
"(",
"node",
")",
",",
"lang",
")",
"for",
"node",
"in",
"edition",
".",
"metadata",
".",
"get",
"(",
"RDF_NAMESPACES",
".",
"CTS",
".",
"label",
")",
":",
"lang",
"=",
"node",
".",
"language",
"self",
".",
"metadata",
".",
"add",
"(",
"RDF_NAMESPACES",
".",
"CTS",
".",
"label",
",",
"lang",
"=",
"lang",
",",
"value",
"=",
"str",
"(",
"node",
")",
")",
"self",
".",
"set_subject",
"(",
"str",
"(",
"node",
")",
",",
"lang",
")",
"for",
"node",
"in",
"edition",
".",
"metadata",
".",
"get",
"(",
"RDF_NAMESPACES",
".",
"CTS",
".",
"description",
")",
":",
"lang",
"=",
"node",
".",
"language",
"self",
".",
"metadata",
".",
"add",
"(",
"RDF_NAMESPACES",
".",
"CTS",
".",
"description",
",",
"lang",
"=",
"lang",
",",
"value",
"=",
"str",
"(",
"node",
")",
")",
"self",
".",
"set_description",
"(",
"str",
"(",
"node",
")",
",",
"lang",
")",
"if",
"not",
"self",
".",
"citation",
".",
"is_set",
"(",
")",
"and",
"edition",
".",
"citation",
".",
"is_set",
"(",
")",
":",
"self",
".",
"citation",
"=",
"edition",
".",
"citation"
] |
Set the object metadata using its collections recursively
:param text_metadata: Object representing the current text as a collection
:type text_metadata: CtsEditionMetadata or CtsTranslationMetadata
|
[
"Set",
"the",
"object",
"metadata",
"using",
"its",
"collections",
"recursively"
] |
b11bbf6b6ae141fc02be70471e3fbf6907be6593
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/text.py#L94-L123
|
train
|
hawkular/hawkular-client-python
|
hawkular/metrics.py
|
create_datapoint
|
def create_datapoint(value, timestamp=None, **tags):
"""
Creates a single datapoint dict with a value, timestamp and tags.
:param value: Value of the datapoint. Type depends on the id's MetricType
:param timestamp: Optional timestamp of the datapoint. Uses client current time if not set. Millisecond accuracy. Can be datetime instance also.
:param tags: Optional datapoint tags. Not to be confused with metric definition tags
"""
if timestamp is None:
timestamp = time_millis()
if type(timestamp) is datetime:
timestamp = datetime_to_time_millis(timestamp)
item = { 'timestamp': timestamp,
'value': value }
if tags is not None:
item['tags'] = tags
return item
|
python
|
def create_datapoint(value, timestamp=None, **tags):
"""
Creates a single datapoint dict with a value, timestamp and tags.
:param value: Value of the datapoint. Type depends on the id's MetricType
:param timestamp: Optional timestamp of the datapoint. Uses client current time if not set. Millisecond accuracy. Can be datetime instance also.
:param tags: Optional datapoint tags. Not to be confused with metric definition tags
"""
if timestamp is None:
timestamp = time_millis()
if type(timestamp) is datetime:
timestamp = datetime_to_time_millis(timestamp)
item = { 'timestamp': timestamp,
'value': value }
if tags is not None:
item['tags'] = tags
return item
|
[
"def",
"create_datapoint",
"(",
"value",
",",
"timestamp",
"=",
"None",
",",
"*",
"*",
"tags",
")",
":",
"if",
"timestamp",
"is",
"None",
":",
"timestamp",
"=",
"time_millis",
"(",
")",
"if",
"type",
"(",
"timestamp",
")",
"is",
"datetime",
":",
"timestamp",
"=",
"datetime_to_time_millis",
"(",
"timestamp",
")",
"item",
"=",
"{",
"'timestamp'",
":",
"timestamp",
",",
"'value'",
":",
"value",
"}",
"if",
"tags",
"is",
"not",
"None",
":",
"item",
"[",
"'tags'",
"]",
"=",
"tags",
"return",
"item"
] |
Creates a single datapoint dict with a value, timestamp and tags.
:param value: Value of the datapoint. Type depends on the id's MetricType
:param timestamp: Optional timestamp of the datapoint. Uses client current time if not set. Millisecond accuracy. Can be datetime instance also.
:param tags: Optional datapoint tags. Not to be confused with metric definition tags
|
[
"Creates",
"a",
"single",
"datapoint",
"dict",
"with",
"a",
"value",
"timestamp",
"and",
"tags",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L362-L382
|
train
|
hawkular/hawkular-client-python
|
hawkular/metrics.py
|
create_metric
|
def create_metric(metric_type, metric_id, data):
"""
Create Hawkular-Metrics' submittable structure.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param data: A datapoint or a list of datapoints created with create_datapoint(value, timestamp, tags)
"""
if not isinstance(data, list):
data = [data]
return { 'type': metric_type,'id': metric_id, 'data': data }
|
python
|
def create_metric(metric_type, metric_id, data):
"""
Create Hawkular-Metrics' submittable structure.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param data: A datapoint or a list of datapoints created with create_datapoint(value, timestamp, tags)
"""
if not isinstance(data, list):
data = [data]
return { 'type': metric_type,'id': metric_id, 'data': data }
|
[
"def",
"create_metric",
"(",
"metric_type",
",",
"metric_id",
",",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"data",
"=",
"[",
"data",
"]",
"return",
"{",
"'type'",
":",
"metric_type",
",",
"'id'",
":",
"metric_id",
",",
"'data'",
":",
"data",
"}"
] |
Create Hawkular-Metrics' submittable structure.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param data: A datapoint or a list of datapoints created with create_datapoint(value, timestamp, tags)
|
[
"Create",
"Hawkular",
"-",
"Metrics",
"submittable",
"structure",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L384-L395
|
train
|
hawkular/hawkular-client-python
|
hawkular/metrics.py
|
HawkularMetricsClient.put
|
def put(self, data):
"""
Send multiple different metric_ids to the server in a single batch. Metrics can be a mixture
of types.
:param data: A dict or a list of dicts created with create_metric(metric_type, metric_id, datapoints)
"""
if not isinstance(data, list):
data = [data]
r = collections.defaultdict(list)
for d in data:
metric_type = d.pop('type', None)
if metric_type is None:
raise HawkularError('Undefined MetricType')
r[metric_type].append(d)
# This isn't transactional, but .. ouh well. One can always repost everything.
for l in r:
self._post(self._get_metrics_raw_url(self._get_url(l)), r[l],parse_json=False)
|
python
|
def put(self, data):
"""
Send multiple different metric_ids to the server in a single batch. Metrics can be a mixture
of types.
:param data: A dict or a list of dicts created with create_metric(metric_type, metric_id, datapoints)
"""
if not isinstance(data, list):
data = [data]
r = collections.defaultdict(list)
for d in data:
metric_type = d.pop('type', None)
if metric_type is None:
raise HawkularError('Undefined MetricType')
r[metric_type].append(d)
# This isn't transactional, but .. ouh well. One can always repost everything.
for l in r:
self._post(self._get_metrics_raw_url(self._get_url(l)), r[l],parse_json=False)
|
[
"def",
"put",
"(",
"self",
",",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"data",
"=",
"[",
"data",
"]",
"r",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"d",
"in",
"data",
":",
"metric_type",
"=",
"d",
".",
"pop",
"(",
"'type'",
",",
"None",
")",
"if",
"metric_type",
"is",
"None",
":",
"raise",
"HawkularError",
"(",
"'Undefined MetricType'",
")",
"r",
"[",
"metric_type",
"]",
".",
"append",
"(",
"d",
")",
"# This isn't transactional, but .. ouh well. One can always repost everything.",
"for",
"l",
"in",
"r",
":",
"self",
".",
"_post",
"(",
"self",
".",
"_get_metrics_raw_url",
"(",
"self",
".",
"_get_url",
"(",
"l",
")",
")",
",",
"r",
"[",
"l",
"]",
",",
"parse_json",
"=",
"False",
")"
] |
Send multiple different metric_ids to the server in a single batch. Metrics can be a mixture
of types.
:param data: A dict or a list of dicts created with create_metric(metric_type, metric_id, datapoints)
|
[
"Send",
"multiple",
"different",
"metric_ids",
"to",
"the",
"server",
"in",
"a",
"single",
"batch",
".",
"Metrics",
"can",
"be",
"a",
"mixture",
"of",
"types",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L109-L129
|
train
|
hawkular/hawkular-client-python
|
hawkular/metrics.py
|
HawkularMetricsClient.push
|
def push(self, metric_type, metric_id, value, timestamp=None):
"""
Pushes a single metric_id, datapoint combination to the server.
This method is an assistant method for the put method by removing the need to
create data structures first.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param value: Datapoint value (depending on the MetricType)
:param timestamp: Timestamp of the datapoint. If left empty, uses current client time. Can be milliseconds since epoch or datetime instance
"""
if type(timestamp) is datetime:
timestamp = datetime_to_time_millis(timestamp)
item = create_metric(metric_type, metric_id, create_datapoint(value, timestamp))
self.put(item)
|
python
|
def push(self, metric_type, metric_id, value, timestamp=None):
"""
Pushes a single metric_id, datapoint combination to the server.
This method is an assistant method for the put method by removing the need to
create data structures first.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param value: Datapoint value (depending on the MetricType)
:param timestamp: Timestamp of the datapoint. If left empty, uses current client time. Can be milliseconds since epoch or datetime instance
"""
if type(timestamp) is datetime:
timestamp = datetime_to_time_millis(timestamp)
item = create_metric(metric_type, metric_id, create_datapoint(value, timestamp))
self.put(item)
|
[
"def",
"push",
"(",
"self",
",",
"metric_type",
",",
"metric_id",
",",
"value",
",",
"timestamp",
"=",
"None",
")",
":",
"if",
"type",
"(",
"timestamp",
")",
"is",
"datetime",
":",
"timestamp",
"=",
"datetime_to_time_millis",
"(",
"timestamp",
")",
"item",
"=",
"create_metric",
"(",
"metric_type",
",",
"metric_id",
",",
"create_datapoint",
"(",
"value",
",",
"timestamp",
")",
")",
"self",
".",
"put",
"(",
"item",
")"
] |
Pushes a single metric_id, datapoint combination to the server.
This method is an assistant method for the put method by removing the need to
create data structures first.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param value: Datapoint value (depending on the MetricType)
:param timestamp: Timestamp of the datapoint. If left empty, uses current client time. Can be milliseconds since epoch or datetime instance
|
[
"Pushes",
"a",
"single",
"metric_id",
"datapoint",
"combination",
"to",
"the",
"server",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L131-L147
|
train
|
hawkular/hawkular-client-python
|
hawkular/metrics.py
|
HawkularMetricsClient.query_metric
|
def query_metric(self, metric_type, metric_id, start=None, end=None, **query_options):
"""
Query for metrics datapoints from the server.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param start: Milliseconds since epoch or datetime instance
:param end: Milliseconds since epoch or datetime instance
:param query_options: For possible query_options, see the Hawkular-Metrics documentation.
"""
if start is not None:
if type(start) is datetime:
query_options['start'] = datetime_to_time_millis(start)
else:
query_options['start'] = start
if end is not None:
if type(end) is datetime:
query_options['end'] = datetime_to_time_millis(end)
else:
query_options['end'] = end
return self._get(
self._get_metrics_raw_url(
self._get_metrics_single_url(metric_type, metric_id)),
**query_options)
|
python
|
def query_metric(self, metric_type, metric_id, start=None, end=None, **query_options):
"""
Query for metrics datapoints from the server.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param start: Milliseconds since epoch or datetime instance
:param end: Milliseconds since epoch or datetime instance
:param query_options: For possible query_options, see the Hawkular-Metrics documentation.
"""
if start is not None:
if type(start) is datetime:
query_options['start'] = datetime_to_time_millis(start)
else:
query_options['start'] = start
if end is not None:
if type(end) is datetime:
query_options['end'] = datetime_to_time_millis(end)
else:
query_options['end'] = end
return self._get(
self._get_metrics_raw_url(
self._get_metrics_single_url(metric_type, metric_id)),
**query_options)
|
[
"def",
"query_metric",
"(",
"self",
",",
"metric_type",
",",
"metric_id",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"*",
"*",
"query_options",
")",
":",
"if",
"start",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"start",
")",
"is",
"datetime",
":",
"query_options",
"[",
"'start'",
"]",
"=",
"datetime_to_time_millis",
"(",
"start",
")",
"else",
":",
"query_options",
"[",
"'start'",
"]",
"=",
"start",
"if",
"end",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"end",
")",
"is",
"datetime",
":",
"query_options",
"[",
"'end'",
"]",
"=",
"datetime_to_time_millis",
"(",
"end",
")",
"else",
":",
"query_options",
"[",
"'end'",
"]",
"=",
"end",
"return",
"self",
".",
"_get",
"(",
"self",
".",
"_get_metrics_raw_url",
"(",
"self",
".",
"_get_metrics_single_url",
"(",
"metric_type",
",",
"metric_id",
")",
")",
",",
"*",
"*",
"query_options",
")"
] |
Query for metrics datapoints from the server.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param start: Milliseconds since epoch or datetime instance
:param end: Milliseconds since epoch or datetime instance
:param query_options: For possible query_options, see the Hawkular-Metrics documentation.
|
[
"Query",
"for",
"metrics",
"datapoints",
"from",
"the",
"server",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L149-L174
|
train
|
hawkular/hawkular-client-python
|
hawkular/metrics.py
|
HawkularMetricsClient.query_metric_stats
|
def query_metric_stats(self, metric_type, metric_id=None, start=None, end=None, bucketDuration=None, **query_options):
"""
Query for metric aggregates from the server. This is called buckets in the Hawkular-Metrics documentation.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id or None for tags matching only
:param start: Milliseconds since epoch or datetime instance
:param end: Milliseconds since epoch or datetime instance
:param bucketDuration: The timedelta or duration of buckets. Can be a string presentation or timedelta object
:param query_options: For possible query_options, see the Hawkular-Metrics documentation.
"""
if start is not None:
if type(start) is datetime:
query_options['start'] = datetime_to_time_millis(start)
else:
query_options['start'] = start
if end is not None:
if type(end) is datetime:
query_options['end'] = datetime_to_time_millis(end)
else:
query_options['end'] = end
if bucketDuration is not None:
if type(bucketDuration) is timedelta:
query_options['bucketDuration'] = timedelta_to_duration(bucketDuration)
else:
query_options['bucketDuration'] = bucketDuration
if metric_id is not None:
url = self._get_metrics_stats_url(self._get_metrics_single_url(metric_type, metric_id))
else:
if len(query_options) < 0:
raise HawkularError('Tags are required when querying without metric_id')
url = self._get_metrics_stats_url(self._get_url(metric_type))
return self._get(url, **query_options)
|
python
|
def query_metric_stats(self, metric_type, metric_id=None, start=None, end=None, bucketDuration=None, **query_options):
"""
Query for metric aggregates from the server. This is called buckets in the Hawkular-Metrics documentation.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id or None for tags matching only
:param start: Milliseconds since epoch or datetime instance
:param end: Milliseconds since epoch or datetime instance
:param bucketDuration: The timedelta or duration of buckets. Can be a string presentation or timedelta object
:param query_options: For possible query_options, see the Hawkular-Metrics documentation.
"""
if start is not None:
if type(start) is datetime:
query_options['start'] = datetime_to_time_millis(start)
else:
query_options['start'] = start
if end is not None:
if type(end) is datetime:
query_options['end'] = datetime_to_time_millis(end)
else:
query_options['end'] = end
if bucketDuration is not None:
if type(bucketDuration) is timedelta:
query_options['bucketDuration'] = timedelta_to_duration(bucketDuration)
else:
query_options['bucketDuration'] = bucketDuration
if metric_id is not None:
url = self._get_metrics_stats_url(self._get_metrics_single_url(metric_type, metric_id))
else:
if len(query_options) < 0:
raise HawkularError('Tags are required when querying without metric_id')
url = self._get_metrics_stats_url(self._get_url(metric_type))
return self._get(url, **query_options)
|
[
"def",
"query_metric_stats",
"(",
"self",
",",
"metric_type",
",",
"metric_id",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"bucketDuration",
"=",
"None",
",",
"*",
"*",
"query_options",
")",
":",
"if",
"start",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"start",
")",
"is",
"datetime",
":",
"query_options",
"[",
"'start'",
"]",
"=",
"datetime_to_time_millis",
"(",
"start",
")",
"else",
":",
"query_options",
"[",
"'start'",
"]",
"=",
"start",
"if",
"end",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"end",
")",
"is",
"datetime",
":",
"query_options",
"[",
"'end'",
"]",
"=",
"datetime_to_time_millis",
"(",
"end",
")",
"else",
":",
"query_options",
"[",
"'end'",
"]",
"=",
"end",
"if",
"bucketDuration",
"is",
"not",
"None",
":",
"if",
"type",
"(",
"bucketDuration",
")",
"is",
"timedelta",
":",
"query_options",
"[",
"'bucketDuration'",
"]",
"=",
"timedelta_to_duration",
"(",
"bucketDuration",
")",
"else",
":",
"query_options",
"[",
"'bucketDuration'",
"]",
"=",
"bucketDuration",
"if",
"metric_id",
"is",
"not",
"None",
":",
"url",
"=",
"self",
".",
"_get_metrics_stats_url",
"(",
"self",
".",
"_get_metrics_single_url",
"(",
"metric_type",
",",
"metric_id",
")",
")",
"else",
":",
"if",
"len",
"(",
"query_options",
")",
"<",
"0",
":",
"raise",
"HawkularError",
"(",
"'Tags are required when querying without metric_id'",
")",
"url",
"=",
"self",
".",
"_get_metrics_stats_url",
"(",
"self",
".",
"_get_url",
"(",
"metric_type",
")",
")",
"return",
"self",
".",
"_get",
"(",
"url",
",",
"*",
"*",
"query_options",
")"
] |
Query for metric aggregates from the server. This is called buckets in the Hawkular-Metrics documentation.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id or None for tags matching only
:param start: Milliseconds since epoch or datetime instance
:param end: Milliseconds since epoch or datetime instance
:param bucketDuration: The timedelta or duration of buckets. Can be a string presentation or timedelta object
:param query_options: For possible query_options, see the Hawkular-Metrics documentation.
|
[
"Query",
"for",
"metric",
"aggregates",
"from",
"the",
"server",
".",
"This",
"is",
"called",
"buckets",
"in",
"the",
"Hawkular",
"-",
"Metrics",
"documentation",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L176-L212
|
train
|
hawkular/hawkular-client-python
|
hawkular/metrics.py
|
HawkularMetricsClient.query_metric_definition
|
def query_metric_definition(self, metric_type, metric_id):
"""
Query definition of a single metric id.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
"""
return self._get(self._get_metrics_single_url(metric_type, metric_id))
|
python
|
def query_metric_definition(self, metric_type, metric_id):
"""
Query definition of a single metric id.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
"""
return self._get(self._get_metrics_single_url(metric_type, metric_id))
|
[
"def",
"query_metric_definition",
"(",
"self",
",",
"metric_type",
",",
"metric_id",
")",
":",
"return",
"self",
".",
"_get",
"(",
"self",
".",
"_get_metrics_single_url",
"(",
"metric_type",
",",
"metric_id",
")",
")"
] |
Query definition of a single metric id.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
|
[
"Query",
"definition",
"of",
"a",
"single",
"metric",
"id",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L214-L221
|
train
|
hawkular/hawkular-client-python
|
hawkular/metrics.py
|
HawkularMetricsClient.query_metric_definitions
|
def query_metric_definitions(self, metric_type=None, id_filter=None, **tags):
"""
Query available metric definitions.
:param metric_type: A MetricType to be queried. If left to None, matches all the MetricTypes
:param id_filter: Filter the id with regexp is tag filtering is used, otherwise a list of exact metric ids
:param tags: A dict of tag key/value pairs. Uses Hawkular-Metrics tag query language for syntax
"""
params = {}
if id_filter is not None:
params['id'] = id_filter
if metric_type is not None:
params['type'] = MetricType.short(metric_type)
if len(tags) > 0:
params['tags'] = self._transform_tags(**tags)
return self._get(self._get_url(), **params)
|
python
|
def query_metric_definitions(self, metric_type=None, id_filter=None, **tags):
"""
Query available metric definitions.
:param metric_type: A MetricType to be queried. If left to None, matches all the MetricTypes
:param id_filter: Filter the id with regexp is tag filtering is used, otherwise a list of exact metric ids
:param tags: A dict of tag key/value pairs. Uses Hawkular-Metrics tag query language for syntax
"""
params = {}
if id_filter is not None:
params['id'] = id_filter
if metric_type is not None:
params['type'] = MetricType.short(metric_type)
if len(tags) > 0:
params['tags'] = self._transform_tags(**tags)
return self._get(self._get_url(), **params)
|
[
"def",
"query_metric_definitions",
"(",
"self",
",",
"metric_type",
"=",
"None",
",",
"id_filter",
"=",
"None",
",",
"*",
"*",
"tags",
")",
":",
"params",
"=",
"{",
"}",
"if",
"id_filter",
"is",
"not",
"None",
":",
"params",
"[",
"'id'",
"]",
"=",
"id_filter",
"if",
"metric_type",
"is",
"not",
"None",
":",
"params",
"[",
"'type'",
"]",
"=",
"MetricType",
".",
"short",
"(",
"metric_type",
")",
"if",
"len",
"(",
"tags",
")",
">",
"0",
":",
"params",
"[",
"'tags'",
"]",
"=",
"self",
".",
"_transform_tags",
"(",
"*",
"*",
"tags",
")",
"return",
"self",
".",
"_get",
"(",
"self",
".",
"_get_url",
"(",
")",
",",
"*",
"*",
"params",
")"
] |
Query available metric definitions.
:param metric_type: A MetricType to be queried. If left to None, matches all the MetricTypes
:param id_filter: Filter the id with regexp is tag filtering is used, otherwise a list of exact metric ids
:param tags: A dict of tag key/value pairs. Uses Hawkular-Metrics tag query language for syntax
|
[
"Query",
"available",
"metric",
"definitions",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L223-L242
|
train
|
hawkular/hawkular-client-python
|
hawkular/metrics.py
|
HawkularMetricsClient.query_tag_values
|
def query_tag_values(self, metric_type=None, **tags):
"""
Query for possible tag values.
:param metric_type: A MetricType to be queried. If left to None, matches all the MetricTypes
:param tags: A dict of tag key/value pairs. Uses Hawkular-Metrics tag query language for syntax
"""
tagql = self._transform_tags(**tags)
return self._get(self._get_metrics_tags_url(self._get_url(metric_type)) + '/{}'.format(tagql))
|
python
|
def query_tag_values(self, metric_type=None, **tags):
"""
Query for possible tag values.
:param metric_type: A MetricType to be queried. If left to None, matches all the MetricTypes
:param tags: A dict of tag key/value pairs. Uses Hawkular-Metrics tag query language for syntax
"""
tagql = self._transform_tags(**tags)
return self._get(self._get_metrics_tags_url(self._get_url(metric_type)) + '/{}'.format(tagql))
|
[
"def",
"query_tag_values",
"(",
"self",
",",
"metric_type",
"=",
"None",
",",
"*",
"*",
"tags",
")",
":",
"tagql",
"=",
"self",
".",
"_transform_tags",
"(",
"*",
"*",
"tags",
")",
"return",
"self",
".",
"_get",
"(",
"self",
".",
"_get_metrics_tags_url",
"(",
"self",
".",
"_get_url",
"(",
"metric_type",
")",
")",
"+",
"'/{}'",
".",
"format",
"(",
"tagql",
")",
")"
] |
Query for possible tag values.
:param metric_type: A MetricType to be queried. If left to None, matches all the MetricTypes
:param tags: A dict of tag key/value pairs. Uses Hawkular-Metrics tag query language for syntax
|
[
"Query",
"for",
"possible",
"tag",
"values",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L244-L253
|
train
|
hawkular/hawkular-client-python
|
hawkular/metrics.py
|
HawkularMetricsClient.query_metric_tags
|
def query_metric_tags(self, metric_type, metric_id):
"""
Returns a list of tags in the metric definition.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
"""
definition = self._get(self._get_metrics_tags_url(self._get_metrics_single_url(metric_type, metric_id)))
return definition
|
python
|
def query_metric_tags(self, metric_type, metric_id):
"""
Returns a list of tags in the metric definition.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
"""
definition = self._get(self._get_metrics_tags_url(self._get_metrics_single_url(metric_type, metric_id)))
return definition
|
[
"def",
"query_metric_tags",
"(",
"self",
",",
"metric_type",
",",
"metric_id",
")",
":",
"definition",
"=",
"self",
".",
"_get",
"(",
"self",
".",
"_get_metrics_tags_url",
"(",
"self",
".",
"_get_metrics_single_url",
"(",
"metric_type",
",",
"metric_id",
")",
")",
")",
"return",
"definition"
] |
Returns a list of tags in the metric definition.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
|
[
"Returns",
"a",
"list",
"of",
"tags",
"in",
"the",
"metric",
"definition",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L284-L292
|
train
|
hawkular/hawkular-client-python
|
hawkular/metrics.py
|
HawkularMetricsClient.delete_metric_tags
|
def delete_metric_tags(self, metric_type, metric_id, **deleted_tags):
"""
Delete one or more tags from the metric definition.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param deleted_tags: List of deleted tag names. Values can be set to anything
"""
tags = self._transform_tags(**deleted_tags)
tags_url = self._get_metrics_tags_url(self._get_metrics_single_url(metric_type, metric_id)) + '/{0}'.format(tags)
self._delete(tags_url)
|
python
|
def delete_metric_tags(self, metric_type, metric_id, **deleted_tags):
"""
Delete one or more tags from the metric definition.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param deleted_tags: List of deleted tag names. Values can be set to anything
"""
tags = self._transform_tags(**deleted_tags)
tags_url = self._get_metrics_tags_url(self._get_metrics_single_url(metric_type, metric_id)) + '/{0}'.format(tags)
self._delete(tags_url)
|
[
"def",
"delete_metric_tags",
"(",
"self",
",",
"metric_type",
",",
"metric_id",
",",
"*",
"*",
"deleted_tags",
")",
":",
"tags",
"=",
"self",
".",
"_transform_tags",
"(",
"*",
"*",
"deleted_tags",
")",
"tags_url",
"=",
"self",
".",
"_get_metrics_tags_url",
"(",
"self",
".",
"_get_metrics_single_url",
"(",
"metric_type",
",",
"metric_id",
")",
")",
"+",
"'/{0}'",
".",
"format",
"(",
"tags",
")",
"self",
".",
"_delete",
"(",
"tags_url",
")"
] |
Delete one or more tags from the metric definition.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param deleted_tags: List of deleted tag names. Values can be set to anything
|
[
"Delete",
"one",
"or",
"more",
"tags",
"from",
"the",
"metric",
"definition",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L304-L315
|
train
|
hawkular/hawkular-client-python
|
hawkular/metrics.py
|
HawkularMetricsClient.create_tenant
|
def create_tenant(self, tenant_id, retentions=None):
"""
Create a tenant. Currently nothing can be set (to be fixed after the master
version of Hawkular-Metrics has fixed implementation.
:param retentions: A set of retention settings, see Hawkular-Metrics documentation for more info
"""
item = { 'id': tenant_id }
if retentions is not None:
item['retentions'] = retentions
self._post(self._get_tenants_url(), json.dumps(item, indent=2))
|
python
|
def create_tenant(self, tenant_id, retentions=None):
"""
Create a tenant. Currently nothing can be set (to be fixed after the master
version of Hawkular-Metrics has fixed implementation.
:param retentions: A set of retention settings, see Hawkular-Metrics documentation for more info
"""
item = { 'id': tenant_id }
if retentions is not None:
item['retentions'] = retentions
self._post(self._get_tenants_url(), json.dumps(item, indent=2))
|
[
"def",
"create_tenant",
"(",
"self",
",",
"tenant_id",
",",
"retentions",
"=",
"None",
")",
":",
"item",
"=",
"{",
"'id'",
":",
"tenant_id",
"}",
"if",
"retentions",
"is",
"not",
"None",
":",
"item",
"[",
"'retentions'",
"]",
"=",
"retentions",
"self",
".",
"_post",
"(",
"self",
".",
"_get_tenants_url",
"(",
")",
",",
"json",
".",
"dumps",
"(",
"item",
",",
"indent",
"=",
"2",
")",
")"
] |
Create a tenant. Currently nothing can be set (to be fixed after the master
version of Hawkular-Metrics has fixed implementation.
:param retentions: A set of retention settings, see Hawkular-Metrics documentation for more info
|
[
"Create",
"a",
"tenant",
".",
"Currently",
"nothing",
"can",
"be",
"set",
"(",
"to",
"be",
"fixed",
"after",
"the",
"master",
"version",
"of",
"Hawkular",
"-",
"Metrics",
"has",
"fixed",
"implementation",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L327-L338
|
train
|
aiidateam/aiida-codtools
|
aiida_codtools/common/resources.py
|
get_default_options
|
def get_default_options(num_machines=1, max_wallclock_seconds=1800, withmpi=False):
"""
Return an instance of the options dictionary with the minimally required parameters
for a JobCalculation and set to default values unless overriden
:param num_machines: set the number of nodes, default=1
:param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800
:param withmpi: if True the calculation will be run in MPI mode
"""
return {
'resources': {
'num_machines': int(num_machines)
},
'max_wallclock_seconds': int(max_wallclock_seconds),
'withmpi': withmpi,
}
|
python
|
def get_default_options(num_machines=1, max_wallclock_seconds=1800, withmpi=False):
"""
Return an instance of the options dictionary with the minimally required parameters
for a JobCalculation and set to default values unless overriden
:param num_machines: set the number of nodes, default=1
:param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800
:param withmpi: if True the calculation will be run in MPI mode
"""
return {
'resources': {
'num_machines': int(num_machines)
},
'max_wallclock_seconds': int(max_wallclock_seconds),
'withmpi': withmpi,
}
|
[
"def",
"get_default_options",
"(",
"num_machines",
"=",
"1",
",",
"max_wallclock_seconds",
"=",
"1800",
",",
"withmpi",
"=",
"False",
")",
":",
"return",
"{",
"'resources'",
":",
"{",
"'num_machines'",
":",
"int",
"(",
"num_machines",
")",
"}",
",",
"'max_wallclock_seconds'",
":",
"int",
"(",
"max_wallclock_seconds",
")",
",",
"'withmpi'",
":",
"withmpi",
",",
"}"
] |
Return an instance of the options dictionary with the minimally required parameters
for a JobCalculation and set to default values unless overriden
:param num_machines: set the number of nodes, default=1
:param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800
:param withmpi: if True the calculation will be run in MPI mode
|
[
"Return",
"an",
"instance",
"of",
"the",
"options",
"dictionary",
"with",
"the",
"minimally",
"required",
"parameters",
"for",
"a",
"JobCalculation",
"and",
"set",
"to",
"default",
"values",
"unless",
"overriden"
] |
da5e4259b7a2e86cf0cc3f997e11dd36d445fa94
|
https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/common/resources.py#L5-L20
|
train
|
aloetesting/aloe_webdriver
|
aloe_webdriver/screenshot_failed.py
|
take_screenshot
|
def take_screenshot(self):
"""Take a screenshot after a failed step."""
if not self.failed:
return
browser = getattr(world, 'browser', None)
if not browser:
return
try:
scenario_name = self.scenario.name
scenario_index = \
self.scenario.feature.scenarios.index(self.scenario) + 1
except AttributeError:
scenario_name = self.background.keyword
scenario_index = 0
if self.outline is None:
outline_index_str = ''
else:
outline_index = self.scenario.outlines.index(self.outline) + 1
outline_index_str = '_{}'.format(outline_index)
base_name = FORMAT.format(
feature_file=os.path.relpath(self.feature.filename),
scenario_index=scenario_index,
scenario_name=scenario_name,
outline_index=outline_index_str,
)
base_name = re.sub(r'\W', '_', base_name, flags=re.UNICODE)
base_name = os.path.join(DIRECTORY, base_name)
world.browser.save_screenshot('{}.png'.format(base_name))
with open('{}.html'.format(base_name), 'w') as page_source_file:
page_source_file.write(world.browser.page_source)
|
python
|
def take_screenshot(self):
"""Take a screenshot after a failed step."""
if not self.failed:
return
browser = getattr(world, 'browser', None)
if not browser:
return
try:
scenario_name = self.scenario.name
scenario_index = \
self.scenario.feature.scenarios.index(self.scenario) + 1
except AttributeError:
scenario_name = self.background.keyword
scenario_index = 0
if self.outline is None:
outline_index_str = ''
else:
outline_index = self.scenario.outlines.index(self.outline) + 1
outline_index_str = '_{}'.format(outline_index)
base_name = FORMAT.format(
feature_file=os.path.relpath(self.feature.filename),
scenario_index=scenario_index,
scenario_name=scenario_name,
outline_index=outline_index_str,
)
base_name = re.sub(r'\W', '_', base_name, flags=re.UNICODE)
base_name = os.path.join(DIRECTORY, base_name)
world.browser.save_screenshot('{}.png'.format(base_name))
with open('{}.html'.format(base_name), 'w') as page_source_file:
page_source_file.write(world.browser.page_source)
|
[
"def",
"take_screenshot",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"failed",
":",
"return",
"browser",
"=",
"getattr",
"(",
"world",
",",
"'browser'",
",",
"None",
")",
"if",
"not",
"browser",
":",
"return",
"try",
":",
"scenario_name",
"=",
"self",
".",
"scenario",
".",
"name",
"scenario_index",
"=",
"self",
".",
"scenario",
".",
"feature",
".",
"scenarios",
".",
"index",
"(",
"self",
".",
"scenario",
")",
"+",
"1",
"except",
"AttributeError",
":",
"scenario_name",
"=",
"self",
".",
"background",
".",
"keyword",
"scenario_index",
"=",
"0",
"if",
"self",
".",
"outline",
"is",
"None",
":",
"outline_index_str",
"=",
"''",
"else",
":",
"outline_index",
"=",
"self",
".",
"scenario",
".",
"outlines",
".",
"index",
"(",
"self",
".",
"outline",
")",
"+",
"1",
"outline_index_str",
"=",
"'_{}'",
".",
"format",
"(",
"outline_index",
")",
"base_name",
"=",
"FORMAT",
".",
"format",
"(",
"feature_file",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"self",
".",
"feature",
".",
"filename",
")",
",",
"scenario_index",
"=",
"scenario_index",
",",
"scenario_name",
"=",
"scenario_name",
",",
"outline_index",
"=",
"outline_index_str",
",",
")",
"base_name",
"=",
"re",
".",
"sub",
"(",
"r'\\W'",
",",
"'_'",
",",
"base_name",
",",
"flags",
"=",
"re",
".",
"UNICODE",
")",
"base_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"DIRECTORY",
",",
"base_name",
")",
"world",
".",
"browser",
".",
"save_screenshot",
"(",
"'{}.png'",
".",
"format",
"(",
"base_name",
")",
")",
"with",
"open",
"(",
"'{}.html'",
".",
"format",
"(",
"base_name",
")",
",",
"'w'",
")",
"as",
"page_source_file",
":",
"page_source_file",
".",
"write",
"(",
"world",
".",
"browser",
".",
"page_source",
")"
] |
Take a screenshot after a failed step.
|
[
"Take",
"a",
"screenshot",
"after",
"a",
"failed",
"step",
"."
] |
65d847da4bdc63f9c015cb19d4efdee87df8ffad
|
https://github.com/aloetesting/aloe_webdriver/blob/65d847da4bdc63f9c015cb19d4efdee87df8ffad/aloe_webdriver/screenshot_failed.py#L64-L100
|
train
|
SHDShim/pytheos
|
pytheos/eqn_kunc.py
|
kunc_p
|
def kunc_p(v, v0, k0, k0p, order=5):
"""
calculate Kunc EOS
see Dorogokupets 2015 for detail
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param order: order for the Kunc equation
:return: pressure in GPa
"""
return cal_p_kunc(v, [v0, k0, k0p], order=order,
uncertainties=isuncertainties([v, v0, k0, k0p]))
|
python
|
def kunc_p(v, v0, k0, k0p, order=5):
"""
calculate Kunc EOS
see Dorogokupets 2015 for detail
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param order: order for the Kunc equation
:return: pressure in GPa
"""
return cal_p_kunc(v, [v0, k0, k0p], order=order,
uncertainties=isuncertainties([v, v0, k0, k0p]))
|
[
"def",
"kunc_p",
"(",
"v",
",",
"v0",
",",
"k0",
",",
"k0p",
",",
"order",
"=",
"5",
")",
":",
"return",
"cal_p_kunc",
"(",
"v",
",",
"[",
"v0",
",",
"k0",
",",
"k0p",
"]",
",",
"order",
"=",
"order",
",",
"uncertainties",
"=",
"isuncertainties",
"(",
"[",
"v",
",",
"v0",
",",
"k0",
",",
"k0p",
"]",
")",
")"
] |
calculate Kunc EOS
see Dorogokupets 2015 for detail
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param order: order for the Kunc equation
:return: pressure in GPa
|
[
"calculate",
"Kunc",
"EOS",
"see",
"Dorogokupets",
"2015",
"for",
"detail"
] |
be079624405e92fbec60c5ead253eb5917e55237
|
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_kunc.py#L13-L26
|
train
|
SHDShim/pytheos
|
pytheos/eqn_kunc.py
|
cal_p_kunc
|
def cal_p_kunc(v, k, order=5, uncertainties=True):
"""
calculate Kunc EOS,
see Dorogokupets2015 for functional form
:param v: unit-cell volume in A^3
:param k: [v0, k0, k0p]
:param order: order for the Kunc equation
:param uncertainties: use of uncertainties package
:return: pressure in GPa
:note: internal function
"""
v0 = k[0]
k0 = k[1]
k0p = k[2]
x = np.power(v / v0, 1. / 3.)
f1 = (1. - x) / (np.power(x, order))
if uncertainties:
f2 = unp.exp((1.5 * k0p - order + 0.5) * (1. - x))
else:
f2 = np.exp((1.5 * k0p - order + 0.5) * (1. - x))
p = 3. * k0 * f1 * f2
return p
|
python
|
def cal_p_kunc(v, k, order=5, uncertainties=True):
"""
calculate Kunc EOS,
see Dorogokupets2015 for functional form
:param v: unit-cell volume in A^3
:param k: [v0, k0, k0p]
:param order: order for the Kunc equation
:param uncertainties: use of uncertainties package
:return: pressure in GPa
:note: internal function
"""
v0 = k[0]
k0 = k[1]
k0p = k[2]
x = np.power(v / v0, 1. / 3.)
f1 = (1. - x) / (np.power(x, order))
if uncertainties:
f2 = unp.exp((1.5 * k0p - order + 0.5) * (1. - x))
else:
f2 = np.exp((1.5 * k0p - order + 0.5) * (1. - x))
p = 3. * k0 * f1 * f2
return p
|
[
"def",
"cal_p_kunc",
"(",
"v",
",",
"k",
",",
"order",
"=",
"5",
",",
"uncertainties",
"=",
"True",
")",
":",
"v0",
"=",
"k",
"[",
"0",
"]",
"k0",
"=",
"k",
"[",
"1",
"]",
"k0p",
"=",
"k",
"[",
"2",
"]",
"x",
"=",
"np",
".",
"power",
"(",
"v",
"/",
"v0",
",",
"1.",
"/",
"3.",
")",
"f1",
"=",
"(",
"1.",
"-",
"x",
")",
"/",
"(",
"np",
".",
"power",
"(",
"x",
",",
"order",
")",
")",
"if",
"uncertainties",
":",
"f2",
"=",
"unp",
".",
"exp",
"(",
"(",
"1.5",
"*",
"k0p",
"-",
"order",
"+",
"0.5",
")",
"*",
"(",
"1.",
"-",
"x",
")",
")",
"else",
":",
"f2",
"=",
"np",
".",
"exp",
"(",
"(",
"1.5",
"*",
"k0p",
"-",
"order",
"+",
"0.5",
")",
"*",
"(",
"1.",
"-",
"x",
")",
")",
"p",
"=",
"3.",
"*",
"k0",
"*",
"f1",
"*",
"f2",
"return",
"p"
] |
calculate Kunc EOS,
see Dorogokupets2015 for functional form
:param v: unit-cell volume in A^3
:param k: [v0, k0, k0p]
:param order: order for the Kunc equation
:param uncertainties: use of uncertainties package
:return: pressure in GPa
:note: internal function
|
[
"calculate",
"Kunc",
"EOS",
"see",
"Dorogokupets2015",
"for",
"functional",
"form"
] |
be079624405e92fbec60c5ead253eb5917e55237
|
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_kunc.py#L29-L51
|
train
|
totalgood/twip
|
twip/futil.py
|
find_files
|
def find_files(path='', ext='', level=None, typ=list, dirs=False, files=True, verbosity=0):
""" Recursively find all files in the indicated directory
Filter by the indicated file name extension (ext)
Args:
path (str): Root/base path to search.
ext (str): File name extension. Only file paths that ".endswith()" this string will be returned
level (int, optional): Depth of file tree to halt recursion at.
None = full recursion to as deep as it goes
0 = nonrecursive, just provide a list of files at the root level of the tree
1 = one level of depth deeper in the tree
typ (type): output type (default: list). if a mapping type is provided the keys will be the full paths (unique)
dirs (bool): Whether to yield dir paths along with file paths (default: False)
files (bool): Whether to yield file paths (default: True)
`dirs=True`, `files=False` is equivalent to `ls -d`
Returns:
list of dicts: dict keys are { 'path', 'name', 'bytes', 'created', 'modified', 'accessed', 'permissions' }
path (str): Full, absolute paths to file beneath the indicated directory and ending with `ext`
name (str): File name only (everythin after the last slash in the path)
size (int): File size in bytes
created (datetime): File creation timestamp from file system
modified (datetime): File modification timestamp from file system
accessed (datetime): File access timestamp from file system
permissions (int): File permissions bytes as a chown-style integer with a maximum of 4 digits
type (str): One of 'file', 'dir', 'symlink->file', 'symlink->dir', 'symlink->broken'
e.g.: 777 or 1755
Examples:
>>> 'util.py' in [d['name'] for d in find_files(os.path.dirname(__file__), ext='.py', level=0)]
True
>>> (d for d in find_files(os.path.dirname(__file__), ext='.py') if d['name'] == 'util.py').next()['size'] > 1000
True
There should be an __init__ file in the same directory as this script.
And it should be at the top of the list.
>>> sorted(d['name'] for d in find_files(os.path.dirname(__file__), ext='.py', level=0))[0]
'__init__.py'
>>> all(d['type'] in ('file','dir','symlink->file','symlink->dir','mount-point->file','mount-point->dir','block-device',
'symlink->broken','pipe','special','socket','unknown') for d in find_files(level=1, files=True, dirs=True))
True
>>> os.path.join(os.path.dirname(__file__), '__init__.py') in find_files(
... os.path.dirname(__file__), ext='.py', level=0, typ=dict)
True
"""
gen = generate_files(path, ext=ext, level=level, dirs=dirs, files=files, verbosity=verbosity)
if isinstance(typ(), collections.Mapping):
return typ((ff['path'], ff) for ff in gen)
elif typ is not None:
return typ(gen)
else:
return gen
|
python
|
def find_files(path='', ext='', level=None, typ=list, dirs=False, files=True, verbosity=0):
""" Recursively find all files in the indicated directory
Filter by the indicated file name extension (ext)
Args:
path (str): Root/base path to search.
ext (str): File name extension. Only file paths that ".endswith()" this string will be returned
level (int, optional): Depth of file tree to halt recursion at.
None = full recursion to as deep as it goes
0 = nonrecursive, just provide a list of files at the root level of the tree
1 = one level of depth deeper in the tree
typ (type): output type (default: list). if a mapping type is provided the keys will be the full paths (unique)
dirs (bool): Whether to yield dir paths along with file paths (default: False)
files (bool): Whether to yield file paths (default: True)
`dirs=True`, `files=False` is equivalent to `ls -d`
Returns:
list of dicts: dict keys are { 'path', 'name', 'bytes', 'created', 'modified', 'accessed', 'permissions' }
path (str): Full, absolute paths to file beneath the indicated directory and ending with `ext`
name (str): File name only (everythin after the last slash in the path)
size (int): File size in bytes
created (datetime): File creation timestamp from file system
modified (datetime): File modification timestamp from file system
accessed (datetime): File access timestamp from file system
permissions (int): File permissions bytes as a chown-style integer with a maximum of 4 digits
type (str): One of 'file', 'dir', 'symlink->file', 'symlink->dir', 'symlink->broken'
e.g.: 777 or 1755
Examples:
>>> 'util.py' in [d['name'] for d in find_files(os.path.dirname(__file__), ext='.py', level=0)]
True
>>> (d for d in find_files(os.path.dirname(__file__), ext='.py') if d['name'] == 'util.py').next()['size'] > 1000
True
There should be an __init__ file in the same directory as this script.
And it should be at the top of the list.
>>> sorted(d['name'] for d in find_files(os.path.dirname(__file__), ext='.py', level=0))[0]
'__init__.py'
>>> all(d['type'] in ('file','dir','symlink->file','symlink->dir','mount-point->file','mount-point->dir','block-device',
'symlink->broken','pipe','special','socket','unknown') for d in find_files(level=1, files=True, dirs=True))
True
>>> os.path.join(os.path.dirname(__file__), '__init__.py') in find_files(
... os.path.dirname(__file__), ext='.py', level=0, typ=dict)
True
"""
gen = generate_files(path, ext=ext, level=level, dirs=dirs, files=files, verbosity=verbosity)
if isinstance(typ(), collections.Mapping):
return typ((ff['path'], ff) for ff in gen)
elif typ is not None:
return typ(gen)
else:
return gen
|
[
"def",
"find_files",
"(",
"path",
"=",
"''",
",",
"ext",
"=",
"''",
",",
"level",
"=",
"None",
",",
"typ",
"=",
"list",
",",
"dirs",
"=",
"False",
",",
"files",
"=",
"True",
",",
"verbosity",
"=",
"0",
")",
":",
"gen",
"=",
"generate_files",
"(",
"path",
",",
"ext",
"=",
"ext",
",",
"level",
"=",
"level",
",",
"dirs",
"=",
"dirs",
",",
"files",
"=",
"files",
",",
"verbosity",
"=",
"verbosity",
")",
"if",
"isinstance",
"(",
"typ",
"(",
")",
",",
"collections",
".",
"Mapping",
")",
":",
"return",
"typ",
"(",
"(",
"ff",
"[",
"'path'",
"]",
",",
"ff",
")",
"for",
"ff",
"in",
"gen",
")",
"elif",
"typ",
"is",
"not",
"None",
":",
"return",
"typ",
"(",
"gen",
")",
"else",
":",
"return",
"gen"
] |
Recursively find all files in the indicated directory
Filter by the indicated file name extension (ext)
Args:
path (str): Root/base path to search.
ext (str): File name extension. Only file paths that ".endswith()" this string will be returned
level (int, optional): Depth of file tree to halt recursion at.
None = full recursion to as deep as it goes
0 = nonrecursive, just provide a list of files at the root level of the tree
1 = one level of depth deeper in the tree
typ (type): output type (default: list). if a mapping type is provided the keys will be the full paths (unique)
dirs (bool): Whether to yield dir paths along with file paths (default: False)
files (bool): Whether to yield file paths (default: True)
`dirs=True`, `files=False` is equivalent to `ls -d`
Returns:
list of dicts: dict keys are { 'path', 'name', 'bytes', 'created', 'modified', 'accessed', 'permissions' }
path (str): Full, absolute paths to file beneath the indicated directory and ending with `ext`
name (str): File name only (everythin after the last slash in the path)
size (int): File size in bytes
created (datetime): File creation timestamp from file system
modified (datetime): File modification timestamp from file system
accessed (datetime): File access timestamp from file system
permissions (int): File permissions bytes as a chown-style integer with a maximum of 4 digits
type (str): One of 'file', 'dir', 'symlink->file', 'symlink->dir', 'symlink->broken'
e.g.: 777 or 1755
Examples:
>>> 'util.py' in [d['name'] for d in find_files(os.path.dirname(__file__), ext='.py', level=0)]
True
>>> (d for d in find_files(os.path.dirname(__file__), ext='.py') if d['name'] == 'util.py').next()['size'] > 1000
True
There should be an __init__ file in the same directory as this script.
And it should be at the top of the list.
>>> sorted(d['name'] for d in find_files(os.path.dirname(__file__), ext='.py', level=0))[0]
'__init__.py'
>>> all(d['type'] in ('file','dir','symlink->file','symlink->dir','mount-point->file','mount-point->dir','block-device',
'symlink->broken','pipe','special','socket','unknown') for d in find_files(level=1, files=True, dirs=True))
True
>>> os.path.join(os.path.dirname(__file__), '__init__.py') in find_files(
... os.path.dirname(__file__), ext='.py', level=0, typ=dict)
True
|
[
"Recursively",
"find",
"all",
"files",
"in",
"the",
"indicated",
"directory"
] |
5c0411d2acfbe5b421841072814c9152591c03f7
|
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/futil.py#L111-L163
|
train
|
erijo/tellive-py
|
tellive/livemessage.py
|
LiveMessageToken.serialize
|
def serialize(self):
"""Serialize the token and return it as bytes."""
if type(self.value) == int:
return "i{:X}s".format(self.value).encode('ascii')
if type(self.value) == str:
value = self.value.encode('utf-8')
return "{:X}:".format(len(value)).encode('ascii') + value
if type(self.value) == bytes:
value = base64.standard_b64encode(self.value)
return "u{:X}:".format(len(value)).encode('ascii') + value
if type(self.value) == list:
items = [LiveMessageToken(m).serialize() for m in self.value]
return b'l' + b''.join(items) + b's'
if type(self.value) == dict:
items = []
for key, value in self.value.items():
items.append(LiveMessageToken(str(key)).serialize())
items.append(LiveMessageToken(value).serialize())
return b'h' + b''.join(items) + b's'
raise RuntimeError("Unknown type %s" % type(self.value))
|
python
|
def serialize(self):
"""Serialize the token and return it as bytes."""
if type(self.value) == int:
return "i{:X}s".format(self.value).encode('ascii')
if type(self.value) == str:
value = self.value.encode('utf-8')
return "{:X}:".format(len(value)).encode('ascii') + value
if type(self.value) == bytes:
value = base64.standard_b64encode(self.value)
return "u{:X}:".format(len(value)).encode('ascii') + value
if type(self.value) == list:
items = [LiveMessageToken(m).serialize() for m in self.value]
return b'l' + b''.join(items) + b's'
if type(self.value) == dict:
items = []
for key, value in self.value.items():
items.append(LiveMessageToken(str(key)).serialize())
items.append(LiveMessageToken(value).serialize())
return b'h' + b''.join(items) + b's'
raise RuntimeError("Unknown type %s" % type(self.value))
|
[
"def",
"serialize",
"(",
"self",
")",
":",
"if",
"type",
"(",
"self",
".",
"value",
")",
"==",
"int",
":",
"return",
"\"i{:X}s\"",
".",
"format",
"(",
"self",
".",
"value",
")",
".",
"encode",
"(",
"'ascii'",
")",
"if",
"type",
"(",
"self",
".",
"value",
")",
"==",
"str",
":",
"value",
"=",
"self",
".",
"value",
".",
"encode",
"(",
"'utf-8'",
")",
"return",
"\"{:X}:\"",
".",
"format",
"(",
"len",
"(",
"value",
")",
")",
".",
"encode",
"(",
"'ascii'",
")",
"+",
"value",
"if",
"type",
"(",
"self",
".",
"value",
")",
"==",
"bytes",
":",
"value",
"=",
"base64",
".",
"standard_b64encode",
"(",
"self",
".",
"value",
")",
"return",
"\"u{:X}:\"",
".",
"format",
"(",
"len",
"(",
"value",
")",
")",
".",
"encode",
"(",
"'ascii'",
")",
"+",
"value",
"if",
"type",
"(",
"self",
".",
"value",
")",
"==",
"list",
":",
"items",
"=",
"[",
"LiveMessageToken",
"(",
"m",
")",
".",
"serialize",
"(",
")",
"for",
"m",
"in",
"self",
".",
"value",
"]",
"return",
"b'l'",
"+",
"b''",
".",
"join",
"(",
"items",
")",
"+",
"b's'",
"if",
"type",
"(",
"self",
".",
"value",
")",
"==",
"dict",
":",
"items",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"self",
".",
"value",
".",
"items",
"(",
")",
":",
"items",
".",
"append",
"(",
"LiveMessageToken",
"(",
"str",
"(",
"key",
")",
")",
".",
"serialize",
"(",
")",
")",
"items",
".",
"append",
"(",
"LiveMessageToken",
"(",
"value",
")",
".",
"serialize",
"(",
")",
")",
"return",
"b'h'",
"+",
"b''",
".",
"join",
"(",
"items",
")",
"+",
"b's'",
"raise",
"RuntimeError",
"(",
"\"Unknown type %s\"",
"%",
"type",
"(",
"self",
".",
"value",
")",
")"
] |
Serialize the token and return it as bytes.
|
[
"Serialize",
"the",
"token",
"and",
"return",
"it",
"as",
"bytes",
"."
] |
a84ebb1eb29ee4c69a085e55e523ac5fff0087fc
|
https://github.com/erijo/tellive-py/blob/a84ebb1eb29ee4c69a085e55e523ac5fff0087fc/tellive/livemessage.py#L31-L55
|
train
|
ChrisBeaumont/smother
|
smother/control.py
|
Smother.write
|
def write(self, file_or_path, append=False, timeout=10):
"""
Write Smother results to a file.
Parameters
----------
fiile_or_path : str
Path to write report to
append : bool
If True, read an existing smother report from `outpath`
and combine it with this file before writing.
timeout : int
Time in seconds to wait to acquire a file lock, before
raising an error.
Note
----
Append mode is atomic when file_or_path is a path,
and can be safely run in a multithreaded or
multiprocess test environment.
When using `parallel_mode`, file_or_path is given a unique
suffix based on the machine name and process id.
"""
if isinstance(file_or_path, six.string_types):
if self.coverage:
file_or_path = get_smother_filename(
file_or_path, self.coverage.config.parallel)
outfile = Lock(
file_or_path, mode='a+',
timeout=timeout,
fail_when_locked=False
)
else:
outfile = noclose(file_or_path)
with outfile as fh:
if append:
fh.seek(0)
try:
other = Smother.load(fh)
except ValueError: # no smother data
pass
else:
self |= other
fh.seek(0)
fh.truncate() # required to overwrite data in a+ mode
json.dump(self.data, fh)
|
python
|
def write(self, file_or_path, append=False, timeout=10):
"""
Write Smother results to a file.
Parameters
----------
fiile_or_path : str
Path to write report to
append : bool
If True, read an existing smother report from `outpath`
and combine it with this file before writing.
timeout : int
Time in seconds to wait to acquire a file lock, before
raising an error.
Note
----
Append mode is atomic when file_or_path is a path,
and can be safely run in a multithreaded or
multiprocess test environment.
When using `parallel_mode`, file_or_path is given a unique
suffix based on the machine name and process id.
"""
if isinstance(file_or_path, six.string_types):
if self.coverage:
file_or_path = get_smother_filename(
file_or_path, self.coverage.config.parallel)
outfile = Lock(
file_or_path, mode='a+',
timeout=timeout,
fail_when_locked=False
)
else:
outfile = noclose(file_or_path)
with outfile as fh:
if append:
fh.seek(0)
try:
other = Smother.load(fh)
except ValueError: # no smother data
pass
else:
self |= other
fh.seek(0)
fh.truncate() # required to overwrite data in a+ mode
json.dump(self.data, fh)
|
[
"def",
"write",
"(",
"self",
",",
"file_or_path",
",",
"append",
"=",
"False",
",",
"timeout",
"=",
"10",
")",
":",
"if",
"isinstance",
"(",
"file_or_path",
",",
"six",
".",
"string_types",
")",
":",
"if",
"self",
".",
"coverage",
":",
"file_or_path",
"=",
"get_smother_filename",
"(",
"file_or_path",
",",
"self",
".",
"coverage",
".",
"config",
".",
"parallel",
")",
"outfile",
"=",
"Lock",
"(",
"file_or_path",
",",
"mode",
"=",
"'a+'",
",",
"timeout",
"=",
"timeout",
",",
"fail_when_locked",
"=",
"False",
")",
"else",
":",
"outfile",
"=",
"noclose",
"(",
"file_or_path",
")",
"with",
"outfile",
"as",
"fh",
":",
"if",
"append",
":",
"fh",
".",
"seek",
"(",
"0",
")",
"try",
":",
"other",
"=",
"Smother",
".",
"load",
"(",
"fh",
")",
"except",
"ValueError",
":",
"# no smother data",
"pass",
"else",
":",
"self",
"|=",
"other",
"fh",
".",
"seek",
"(",
"0",
")",
"fh",
".",
"truncate",
"(",
")",
"# required to overwrite data in a+ mode",
"json",
".",
"dump",
"(",
"self",
".",
"data",
",",
"fh",
")"
] |
Write Smother results to a file.
Parameters
----------
fiile_or_path : str
Path to write report to
append : bool
If True, read an existing smother report from `outpath`
and combine it with this file before writing.
timeout : int
Time in seconds to wait to acquire a file lock, before
raising an error.
Note
----
Append mode is atomic when file_or_path is a path,
and can be safely run in a multithreaded or
multiprocess test environment.
When using `parallel_mode`, file_or_path is given a unique
suffix based on the machine name and process id.
|
[
"Write",
"Smother",
"results",
"to",
"a",
"file",
"."
] |
65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb
|
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/control.py#L87-L137
|
train
|
ChrisBeaumont/smother
|
smother/control.py
|
Smother.query_context
|
def query_context(self, regions, file_factory=PythonFile):
"""
Return which set of test contexts intersect a set of code regions.
Parameters
----------
regions: A sequence of Intervals
file_factory: Callable (optional, default PythonFile)
A callable that takes a filename and
returns a PythonFile object.
Returns
-------
A QueryResult
"""
result = set()
for region in regions:
try:
pf = file_factory(region.filename)
except InvalidPythonFile:
continue
# region and/or coverage report may use paths
# relative to this directory. Ensure we find a match
# if they use different conventions.
paths = {
os.path.abspath(region.filename),
os.path.relpath(region.filename)
}
for test_context, hits in six.iteritems(self.data):
if test_context in result:
continue
for path in paths:
if region.intersects(pf, hits.get(path, [])):
result.add(test_context)
return QueryResult(result)
|
python
|
def query_context(self, regions, file_factory=PythonFile):
"""
Return which set of test contexts intersect a set of code regions.
Parameters
----------
regions: A sequence of Intervals
file_factory: Callable (optional, default PythonFile)
A callable that takes a filename and
returns a PythonFile object.
Returns
-------
A QueryResult
"""
result = set()
for region in regions:
try:
pf = file_factory(region.filename)
except InvalidPythonFile:
continue
# region and/or coverage report may use paths
# relative to this directory. Ensure we find a match
# if they use different conventions.
paths = {
os.path.abspath(region.filename),
os.path.relpath(region.filename)
}
for test_context, hits in six.iteritems(self.data):
if test_context in result:
continue
for path in paths:
if region.intersects(pf, hits.get(path, [])):
result.add(test_context)
return QueryResult(result)
|
[
"def",
"query_context",
"(",
"self",
",",
"regions",
",",
"file_factory",
"=",
"PythonFile",
")",
":",
"result",
"=",
"set",
"(",
")",
"for",
"region",
"in",
"regions",
":",
"try",
":",
"pf",
"=",
"file_factory",
"(",
"region",
".",
"filename",
")",
"except",
"InvalidPythonFile",
":",
"continue",
"# region and/or coverage report may use paths",
"# relative to this directory. Ensure we find a match",
"# if they use different conventions.",
"paths",
"=",
"{",
"os",
".",
"path",
".",
"abspath",
"(",
"region",
".",
"filename",
")",
",",
"os",
".",
"path",
".",
"relpath",
"(",
"region",
".",
"filename",
")",
"}",
"for",
"test_context",
",",
"hits",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"data",
")",
":",
"if",
"test_context",
"in",
"result",
":",
"continue",
"for",
"path",
"in",
"paths",
":",
"if",
"region",
".",
"intersects",
"(",
"pf",
",",
"hits",
".",
"get",
"(",
"path",
",",
"[",
"]",
")",
")",
":",
"result",
".",
"add",
"(",
"test_context",
")",
"return",
"QueryResult",
"(",
"result",
")"
] |
Return which set of test contexts intersect a set of code regions.
Parameters
----------
regions: A sequence of Intervals
file_factory: Callable (optional, default PythonFile)
A callable that takes a filename and
returns a PythonFile object.
Returns
-------
A QueryResult
|
[
"Return",
"which",
"set",
"of",
"test",
"contexts",
"intersect",
"a",
"set",
"of",
"code",
"regions",
"."
] |
65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb
|
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/control.py#L174-L214
|
train
|
Capitains/MyCapytain
|
MyCapytain/common/reference/_base.py
|
BaseCitationSet.add_child
|
def add_child(self, child):
""" Adds a child to the CitationSet
:param child: Child citation to add
:return:
"""
if isinstance(child, BaseCitation):
self._children.append(child)
|
python
|
def add_child(self, child):
""" Adds a child to the CitationSet
:param child: Child citation to add
:return:
"""
if isinstance(child, BaseCitation):
self._children.append(child)
|
[
"def",
"add_child",
"(",
"self",
",",
"child",
")",
":",
"if",
"isinstance",
"(",
"child",
",",
"BaseCitation",
")",
":",
"self",
".",
"_children",
".",
"append",
"(",
"child",
")"
] |
Adds a child to the CitationSet
:param child: Child citation to add
:return:
|
[
"Adds",
"a",
"child",
"to",
"the",
"CitationSet"
] |
b11bbf6b6ae141fc02be70471e3fbf6907be6593
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/reference/_base.py#L57-L64
|
train
|
Capitains/MyCapytain
|
MyCapytain/common/reference/_base.py
|
BaseCitation.depth
|
def depth(self) -> int:
""" Depth of the citation scheme
.. example:: If we have a Book, Poem, Line system, and the citation we are looking at is Poem, depth is 1
:rtype: int
:return: Depth of the citation scheme
"""
if len(self.children):
return 1 + max([child.depth for child in self.children])
else:
return 1
|
python
|
def depth(self) -> int:
""" Depth of the citation scheme
.. example:: If we have a Book, Poem, Line system, and the citation we are looking at is Poem, depth is 1
:rtype: int
:return: Depth of the citation scheme
"""
if len(self.children):
return 1 + max([child.depth for child in self.children])
else:
return 1
|
[
"def",
"depth",
"(",
"self",
")",
"->",
"int",
":",
"if",
"len",
"(",
"self",
".",
"children",
")",
":",
"return",
"1",
"+",
"max",
"(",
"[",
"child",
".",
"depth",
"for",
"child",
"in",
"self",
".",
"children",
"]",
")",
"else",
":",
"return",
"1"
] |
Depth of the citation scheme
.. example:: If we have a Book, Poem, Line system, and the citation we are looking at is Poem, depth is 1
:rtype: int
:return: Depth of the citation scheme
|
[
"Depth",
"of",
"the",
"citation",
"scheme"
] |
b11bbf6b6ae141fc02be70471e3fbf6907be6593
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/reference/_base.py#L298-L310
|
train
|
Capitains/MyCapytain
|
MyCapytain/resources/prototypes/cts/inventory.py
|
PrototypeCtsCollection.set_link
|
def set_link(self, prop, value):
""" Set given link in CTS Namespace
.. example::
collection.set_link(NAMESPACES.CTS.about, "urn:cts:latinLit:phi1294.phi002")
:param prop: Property to set (Without namespace)
:param value: Value to set for given property
"""
# https://rdflib.readthedocs.io/en/stable/
# URIRef == identifiers (urn, http, URI in general)
# Literal == String or Number (can have a language)
# BNode == Anonymous nodes (So no specific identifier)
# eg. BNode : Edition(MartialEpigrams:URIRef) ---has_metadata--> Metadata(BNode)
if not isinstance(value, URIRef):
value = URIRef(value)
self.metadata.add(prop, value)
|
python
|
def set_link(self, prop, value):
""" Set given link in CTS Namespace
.. example::
collection.set_link(NAMESPACES.CTS.about, "urn:cts:latinLit:phi1294.phi002")
:param prop: Property to set (Without namespace)
:param value: Value to set for given property
"""
# https://rdflib.readthedocs.io/en/stable/
# URIRef == identifiers (urn, http, URI in general)
# Literal == String or Number (can have a language)
# BNode == Anonymous nodes (So no specific identifier)
# eg. BNode : Edition(MartialEpigrams:URIRef) ---has_metadata--> Metadata(BNode)
if not isinstance(value, URIRef):
value = URIRef(value)
self.metadata.add(prop, value)
|
[
"def",
"set_link",
"(",
"self",
",",
"prop",
",",
"value",
")",
":",
"# https://rdflib.readthedocs.io/en/stable/",
"# URIRef == identifiers (urn, http, URI in general)",
"# Literal == String or Number (can have a language)",
"# BNode == Anonymous nodes (So no specific identifier)",
"#\t\teg. BNode : Edition(MartialEpigrams:URIRef) ---has_metadata--> Metadata(BNode)",
"if",
"not",
"isinstance",
"(",
"value",
",",
"URIRef",
")",
":",
"value",
"=",
"URIRef",
"(",
"value",
")",
"self",
".",
"metadata",
".",
"add",
"(",
"prop",
",",
"value",
")"
] |
Set given link in CTS Namespace
.. example::
collection.set_link(NAMESPACES.CTS.about, "urn:cts:latinLit:phi1294.phi002")
:param prop: Property to set (Without namespace)
:param value: Value to set for given property
|
[
"Set",
"given",
"link",
"in",
"CTS",
"Namespace"
] |
b11bbf6b6ae141fc02be70471e3fbf6907be6593
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L118-L135
|
train
|
Capitains/MyCapytain
|
MyCapytain/resources/prototypes/cts/inventory.py
|
CtsTextMetadata.editions
|
def editions(self):
""" Get all editions of the texts
:return: List of editions
:rtype: [CtsTextMetadata]
"""
return [
item
for urn, item in self.parent.children.items()
if isinstance(item, CtsEditionMetadata)
]
|
python
|
def editions(self):
""" Get all editions of the texts
:return: List of editions
:rtype: [CtsTextMetadata]
"""
return [
item
for urn, item in self.parent.children.items()
if isinstance(item, CtsEditionMetadata)
]
|
[
"def",
"editions",
"(",
"self",
")",
":",
"return",
"[",
"item",
"for",
"urn",
",",
"item",
"in",
"self",
".",
"parent",
".",
"children",
".",
"items",
"(",
")",
"if",
"isinstance",
"(",
"item",
",",
"CtsEditionMetadata",
")",
"]"
] |
Get all editions of the texts
:return: List of editions
:rtype: [CtsTextMetadata]
|
[
"Get",
"all",
"editions",
"of",
"the",
"texts"
] |
b11bbf6b6ae141fc02be70471e3fbf6907be6593
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L263-L273
|
train
|
Capitains/MyCapytain
|
MyCapytain/resources/prototypes/cts/inventory.py
|
CtsTextMetadata.get_description
|
def get_description(self, lang=None):
""" Get the DC description of the object
:param lang: Lang to retrieve
:return: Description string representation
:rtype: Literal
"""
return self.metadata.get_single(key=RDF_NAMESPACES.CTS.description, lang=lang)
|
python
|
def get_description(self, lang=None):
""" Get the DC description of the object
:param lang: Lang to retrieve
:return: Description string representation
:rtype: Literal
"""
return self.metadata.get_single(key=RDF_NAMESPACES.CTS.description, lang=lang)
|
[
"def",
"get_description",
"(",
"self",
",",
"lang",
"=",
"None",
")",
":",
"return",
"self",
".",
"metadata",
".",
"get_single",
"(",
"key",
"=",
"RDF_NAMESPACES",
".",
"CTS",
".",
"description",
",",
"lang",
"=",
"lang",
")"
] |
Get the DC description of the object
:param lang: Lang to retrieve
:return: Description string representation
:rtype: Literal
|
[
"Get",
"the",
"DC",
"description",
"of",
"the",
"object"
] |
b11bbf6b6ae141fc02be70471e3fbf6907be6593
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L360-L367
|
train
|
Capitains/MyCapytain
|
MyCapytain/resources/prototypes/cts/inventory.py
|
CtsWorkMetadata.lang
|
def lang(self):
""" Languages this text is in
:return: List of available languages
"""
return str(self.graph.value(self.asNode(), DC.language))
|
python
|
def lang(self):
""" Languages this text is in
:return: List of available languages
"""
return str(self.graph.value(self.asNode(), DC.language))
|
[
"def",
"lang",
"(",
"self",
")",
":",
"return",
"str",
"(",
"self",
".",
"graph",
".",
"value",
"(",
"self",
".",
"asNode",
"(",
")",
",",
"DC",
".",
"language",
")",
")"
] |
Languages this text is in
:return: List of available languages
|
[
"Languages",
"this",
"text",
"is",
"in"
] |
b11bbf6b6ae141fc02be70471e3fbf6907be6593
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L461-L466
|
train
|
Capitains/MyCapytain
|
MyCapytain/resources/prototypes/cts/inventory.py
|
CtsWorkMetadata.lang
|
def lang(self, lang):
""" Language this text is available in
:param lang: Language to add
:type lang: str
"""
self.graph.set((self.asNode(), DC.language, Literal(lang)))
|
python
|
def lang(self, lang):
""" Language this text is available in
:param lang: Language to add
:type lang: str
"""
self.graph.set((self.asNode(), DC.language, Literal(lang)))
|
[
"def",
"lang",
"(",
"self",
",",
"lang",
")",
":",
"self",
".",
"graph",
".",
"set",
"(",
"(",
"self",
".",
"asNode",
"(",
")",
",",
"DC",
".",
"language",
",",
"Literal",
"(",
"lang",
")",
")",
")"
] |
Language this text is available in
:param lang: Language to add
:type lang: str
|
[
"Language",
"this",
"text",
"is",
"available",
"in"
] |
b11bbf6b6ae141fc02be70471e3fbf6907be6593
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L469-L475
|
train
|
Capitains/MyCapytain
|
MyCapytain/resources/prototypes/cts/inventory.py
|
CtsWorkMetadata.update
|
def update(self, other):
""" Merge two XmlCtsWorkMetadata Objects.
- Original (left Object) keeps his parent.
- Added document overwrite text if it already exists
:param other: XmlCtsWorkMetadata object
:type other: CtsWorkMetadata
:return: XmlCtsWorkMetadata Object
:rtype XmlCtsWorkMetadata:
"""
if not isinstance(other, CtsWorkMetadata):
raise TypeError("Cannot add %s to CtsWorkMetadata" % type(other))
elif self.urn != other.urn:
raise InvalidURN("Cannot add CtsWorkMetadata %s to CtsWorkMetadata %s " % (self.urn, other.urn))
for urn, text in other.children.items():
self.texts[urn] = text
self.texts[urn].parent = self
self.texts[urn].resource = None
return self
|
python
|
def update(self, other):
""" Merge two XmlCtsWorkMetadata Objects.
- Original (left Object) keeps his parent.
- Added document overwrite text if it already exists
:param other: XmlCtsWorkMetadata object
:type other: CtsWorkMetadata
:return: XmlCtsWorkMetadata Object
:rtype XmlCtsWorkMetadata:
"""
if not isinstance(other, CtsWorkMetadata):
raise TypeError("Cannot add %s to CtsWorkMetadata" % type(other))
elif self.urn != other.urn:
raise InvalidURN("Cannot add CtsWorkMetadata %s to CtsWorkMetadata %s " % (self.urn, other.urn))
for urn, text in other.children.items():
self.texts[urn] = text
self.texts[urn].parent = self
self.texts[urn].resource = None
return self
|
[
"def",
"update",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"CtsWorkMetadata",
")",
":",
"raise",
"TypeError",
"(",
"\"Cannot add %s to CtsWorkMetadata\"",
"%",
"type",
"(",
"other",
")",
")",
"elif",
"self",
".",
"urn",
"!=",
"other",
".",
"urn",
":",
"raise",
"InvalidURN",
"(",
"\"Cannot add CtsWorkMetadata %s to CtsWorkMetadata %s \"",
"%",
"(",
"self",
".",
"urn",
",",
"other",
".",
"urn",
")",
")",
"for",
"urn",
",",
"text",
"in",
"other",
".",
"children",
".",
"items",
"(",
")",
":",
"self",
".",
"texts",
"[",
"urn",
"]",
"=",
"text",
"self",
".",
"texts",
"[",
"urn",
"]",
".",
"parent",
"=",
"self",
"self",
".",
"texts",
"[",
"urn",
"]",
".",
"resource",
"=",
"None",
"return",
"self"
] |
Merge two XmlCtsWorkMetadata Objects.
- Original (left Object) keeps his parent.
- Added document overwrite text if it already exists
:param other: XmlCtsWorkMetadata object
:type other: CtsWorkMetadata
:return: XmlCtsWorkMetadata Object
:rtype XmlCtsWorkMetadata:
|
[
"Merge",
"two",
"XmlCtsWorkMetadata",
"Objects",
"."
] |
b11bbf6b6ae141fc02be70471e3fbf6907be6593
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L477-L498
|
train
|
Capitains/MyCapytain
|
MyCapytain/resources/prototypes/cts/inventory.py
|
CtsWorkMetadata.get_translation_in
|
def get_translation_in(self, key=None):
""" Find a translation with given language
:param key: Language to find
:type key: text_type
:rtype: [CtsTextMetadata]
:returns: List of availables translations
"""
if key is not None:
return [
item
for item in self.texts.values()
if isinstance(item, CtsTranslationMetadata) and item.lang == key
]
else:
return [
item
for item in self.texts.values()
if isinstance(item, CtsTranslationMetadata)
]
|
python
|
def get_translation_in(self, key=None):
""" Find a translation with given language
:param key: Language to find
:type key: text_type
:rtype: [CtsTextMetadata]
:returns: List of availables translations
"""
if key is not None:
return [
item
for item in self.texts.values()
if isinstance(item, CtsTranslationMetadata) and item.lang == key
]
else:
return [
item
for item in self.texts.values()
if isinstance(item, CtsTranslationMetadata)
]
|
[
"def",
"get_translation_in",
"(",
"self",
",",
"key",
"=",
"None",
")",
":",
"if",
"key",
"is",
"not",
"None",
":",
"return",
"[",
"item",
"for",
"item",
"in",
"self",
".",
"texts",
".",
"values",
"(",
")",
"if",
"isinstance",
"(",
"item",
",",
"CtsTranslationMetadata",
")",
"and",
"item",
".",
"lang",
"==",
"key",
"]",
"else",
":",
"return",
"[",
"item",
"for",
"item",
"in",
"self",
".",
"texts",
".",
"values",
"(",
")",
"if",
"isinstance",
"(",
"item",
",",
"CtsTranslationMetadata",
")",
"]"
] |
Find a translation with given language
:param key: Language to find
:type key: text_type
:rtype: [CtsTextMetadata]
:returns: List of availables translations
|
[
"Find",
"a",
"translation",
"with",
"given",
"language"
] |
b11bbf6b6ae141fc02be70471e3fbf6907be6593
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L500-L519
|
train
|
Capitains/MyCapytain
|
MyCapytain/resources/prototypes/cts/inventory.py
|
CtsTextgroupMetadata.update
|
def update(self, other):
""" Merge two Textgroup Objects.
- Original (left Object) keeps his parent.
- Added document merges with work if it already exists
:param other: Textgroup object
:type other: CtsTextgroupMetadata
:return: Textgroup Object
:rtype: CtsTextgroupMetadata
"""
if not isinstance(other, CtsTextgroupMetadata):
raise TypeError("Cannot add %s to CtsTextgroupMetadata" % type(other))
elif str(self.urn) != str(other.urn):
raise InvalidURN("Cannot add CtsTextgroupMetadata %s to CtsTextgroupMetadata %s " % (self.urn, other.urn))
for urn, work in other.works.items():
if urn in self.works:
self.works[urn].update(deepcopy(work))
else:
self.works[urn] = deepcopy(work)
self.works[urn].parent = self
self.works[urn].resource = None
return self
|
python
|
def update(self, other):
""" Merge two Textgroup Objects.
- Original (left Object) keeps his parent.
- Added document merges with work if it already exists
:param other: Textgroup object
:type other: CtsTextgroupMetadata
:return: Textgroup Object
:rtype: CtsTextgroupMetadata
"""
if not isinstance(other, CtsTextgroupMetadata):
raise TypeError("Cannot add %s to CtsTextgroupMetadata" % type(other))
elif str(self.urn) != str(other.urn):
raise InvalidURN("Cannot add CtsTextgroupMetadata %s to CtsTextgroupMetadata %s " % (self.urn, other.urn))
for urn, work in other.works.items():
if urn in self.works:
self.works[urn].update(deepcopy(work))
else:
self.works[urn] = deepcopy(work)
self.works[urn].parent = self
self.works[urn].resource = None
return self
|
[
"def",
"update",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"CtsTextgroupMetadata",
")",
":",
"raise",
"TypeError",
"(",
"\"Cannot add %s to CtsTextgroupMetadata\"",
"%",
"type",
"(",
"other",
")",
")",
"elif",
"str",
"(",
"self",
".",
"urn",
")",
"!=",
"str",
"(",
"other",
".",
"urn",
")",
":",
"raise",
"InvalidURN",
"(",
"\"Cannot add CtsTextgroupMetadata %s to CtsTextgroupMetadata %s \"",
"%",
"(",
"self",
".",
"urn",
",",
"other",
".",
"urn",
")",
")",
"for",
"urn",
",",
"work",
"in",
"other",
".",
"works",
".",
"items",
"(",
")",
":",
"if",
"urn",
"in",
"self",
".",
"works",
":",
"self",
".",
"works",
"[",
"urn",
"]",
".",
"update",
"(",
"deepcopy",
"(",
"work",
")",
")",
"else",
":",
"self",
".",
"works",
"[",
"urn",
"]",
"=",
"deepcopy",
"(",
"work",
")",
"self",
".",
"works",
"[",
"urn",
"]",
".",
"parent",
"=",
"self",
"self",
".",
"works",
"[",
"urn",
"]",
".",
"resource",
"=",
"None",
"return",
"self"
] |
Merge two Textgroup Objects.
- Original (left Object) keeps his parent.
- Added document merges with work if it already exists
:param other: Textgroup object
:type other: CtsTextgroupMetadata
:return: Textgroup Object
:rtype: CtsTextgroupMetadata
|
[
"Merge",
"two",
"Textgroup",
"Objects",
"."
] |
b11bbf6b6ae141fc02be70471e3fbf6907be6593
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L581-L605
|
train
|
hawkular/hawkular-client-python
|
hawkular/alerts/triggers.py
|
AlertsTriggerClient.get
|
def get(self, tags=[], trigger_ids=[]):
"""
Get triggers with optional filtering. Querying without parameters returns all the trigger definitions.
:param tags: Fetch triggers with matching tags only. Use * to match all values.
:param trigger_ids: List of triggerIds to fetch
"""
params = {}
if len(tags) > 0:
params['tags'] = ','.join(tags)
if len(trigger_ids) > 0:
params['triggerIds'] = ','.join(trigger_ids)
url = self._service_url('triggers', params=params)
triggers_dict = self._get(url)
return Trigger.list_to_object_list(triggers_dict)
|
python
|
def get(self, tags=[], trigger_ids=[]):
"""
Get triggers with optional filtering. Querying without parameters returns all the trigger definitions.
:param tags: Fetch triggers with matching tags only. Use * to match all values.
:param trigger_ids: List of triggerIds to fetch
"""
params = {}
if len(tags) > 0:
params['tags'] = ','.join(tags)
if len(trigger_ids) > 0:
params['triggerIds'] = ','.join(trigger_ids)
url = self._service_url('triggers', params=params)
triggers_dict = self._get(url)
return Trigger.list_to_object_list(triggers_dict)
|
[
"def",
"get",
"(",
"self",
",",
"tags",
"=",
"[",
"]",
",",
"trigger_ids",
"=",
"[",
"]",
")",
":",
"params",
"=",
"{",
"}",
"if",
"len",
"(",
"tags",
")",
">",
"0",
":",
"params",
"[",
"'tags'",
"]",
"=",
"','",
".",
"join",
"(",
"tags",
")",
"if",
"len",
"(",
"trigger_ids",
")",
">",
"0",
":",
"params",
"[",
"'triggerIds'",
"]",
"=",
"','",
".",
"join",
"(",
"trigger_ids",
")",
"url",
"=",
"self",
".",
"_service_url",
"(",
"'triggers'",
",",
"params",
"=",
"params",
")",
"triggers_dict",
"=",
"self",
".",
"_get",
"(",
"url",
")",
"return",
"Trigger",
".",
"list_to_object_list",
"(",
"triggers_dict",
")"
] |
Get triggers with optional filtering. Querying without parameters returns all the trigger definitions.
:param tags: Fetch triggers with matching tags only. Use * to match all values.
:param trigger_ids: List of triggerIds to fetch
|
[
"Get",
"triggers",
"with",
"optional",
"filtering",
".",
"Querying",
"without",
"parameters",
"returns",
"all",
"the",
"trigger",
"definitions",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L132-L148
|
train
|
hawkular/hawkular-client-python
|
hawkular/alerts/triggers.py
|
AlertsTriggerClient.create
|
def create(self, trigger):
"""
Create a new trigger.
:param trigger: FullTrigger or Trigger class to be created
:return: The created trigger
"""
data = self._serialize_object(trigger)
if isinstance(trigger, FullTrigger):
returned_dict = self._post(self._service_url(['triggers', 'trigger']), data)
return FullTrigger(returned_dict)
else:
returned_dict = self._post(self._service_url('triggers'), data)
return Trigger(returned_dict)
|
python
|
def create(self, trigger):
"""
Create a new trigger.
:param trigger: FullTrigger or Trigger class to be created
:return: The created trigger
"""
data = self._serialize_object(trigger)
if isinstance(trigger, FullTrigger):
returned_dict = self._post(self._service_url(['triggers', 'trigger']), data)
return FullTrigger(returned_dict)
else:
returned_dict = self._post(self._service_url('triggers'), data)
return Trigger(returned_dict)
|
[
"def",
"create",
"(",
"self",
",",
"trigger",
")",
":",
"data",
"=",
"self",
".",
"_serialize_object",
"(",
"trigger",
")",
"if",
"isinstance",
"(",
"trigger",
",",
"FullTrigger",
")",
":",
"returned_dict",
"=",
"self",
".",
"_post",
"(",
"self",
".",
"_service_url",
"(",
"[",
"'triggers'",
",",
"'trigger'",
"]",
")",
",",
"data",
")",
"return",
"FullTrigger",
"(",
"returned_dict",
")",
"else",
":",
"returned_dict",
"=",
"self",
".",
"_post",
"(",
"self",
".",
"_service_url",
"(",
"'triggers'",
")",
",",
"data",
")",
"return",
"Trigger",
"(",
"returned_dict",
")"
] |
Create a new trigger.
:param trigger: FullTrigger or Trigger class to be created
:return: The created trigger
|
[
"Create",
"a",
"new",
"trigger",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L150-L163
|
train
|
hawkular/hawkular-client-python
|
hawkular/alerts/triggers.py
|
AlertsTriggerClient.update
|
def update(self, trigger_id, full_trigger):
"""
Update an existing full trigger.
:param full_trigger: FullTrigger with conditions, dampenings and triggers
:type full_trigger: FullTrigger
:return: Updated FullTrigger definition
"""
data = self._serialize_object(full_trigger)
rdict = self._put(self._service_url(['triggers', 'trigger', trigger_id]), data)
return FullTrigger(rdict)
|
python
|
def update(self, trigger_id, full_trigger):
"""
Update an existing full trigger.
:param full_trigger: FullTrigger with conditions, dampenings and triggers
:type full_trigger: FullTrigger
:return: Updated FullTrigger definition
"""
data = self._serialize_object(full_trigger)
rdict = self._put(self._service_url(['triggers', 'trigger', trigger_id]), data)
return FullTrigger(rdict)
|
[
"def",
"update",
"(",
"self",
",",
"trigger_id",
",",
"full_trigger",
")",
":",
"data",
"=",
"self",
".",
"_serialize_object",
"(",
"full_trigger",
")",
"rdict",
"=",
"self",
".",
"_put",
"(",
"self",
".",
"_service_url",
"(",
"[",
"'triggers'",
",",
"'trigger'",
",",
"trigger_id",
"]",
")",
",",
"data",
")",
"return",
"FullTrigger",
"(",
"rdict",
")"
] |
Update an existing full trigger.
:param full_trigger: FullTrigger with conditions, dampenings and triggers
:type full_trigger: FullTrigger
:return: Updated FullTrigger definition
|
[
"Update",
"an",
"existing",
"full",
"trigger",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L165-L175
|
train
|
hawkular/hawkular-client-python
|
hawkular/alerts/triggers.py
|
AlertsTriggerClient.create_group
|
def create_group(self, trigger):
"""
Create a new group trigger.
:param trigger: Group member trigger to be created
:return: The created group Trigger
"""
data = self._serialize_object(trigger)
return Trigger(self._post(self._service_url(['triggers', 'groups']), data))
|
python
|
def create_group(self, trigger):
"""
Create a new group trigger.
:param trigger: Group member trigger to be created
:return: The created group Trigger
"""
data = self._serialize_object(trigger)
return Trigger(self._post(self._service_url(['triggers', 'groups']), data))
|
[
"def",
"create_group",
"(",
"self",
",",
"trigger",
")",
":",
"data",
"=",
"self",
".",
"_serialize_object",
"(",
"trigger",
")",
"return",
"Trigger",
"(",
"self",
".",
"_post",
"(",
"self",
".",
"_service_url",
"(",
"[",
"'triggers'",
",",
"'groups'",
"]",
")",
",",
"data",
")",
")"
] |
Create a new group trigger.
:param trigger: Group member trigger to be created
:return: The created group Trigger
|
[
"Create",
"a",
"new",
"group",
"trigger",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L202-L210
|
train
|
hawkular/hawkular-client-python
|
hawkular/alerts/triggers.py
|
AlertsTriggerClient.group_members
|
def group_members(self, group_id, include_orphans=False):
"""
Find all group member trigger definitions
:param group_id: group trigger id
:param include_orphans: If True, include orphan members
:return: list of asociated group members as trigger objects
"""
params = {'includeOrphans': str(include_orphans).lower()}
url = self._service_url(['triggers', 'groups', group_id, 'members'], params=params)
return Trigger.list_to_object_list(self._get(url))
|
python
|
def group_members(self, group_id, include_orphans=False):
"""
Find all group member trigger definitions
:param group_id: group trigger id
:param include_orphans: If True, include orphan members
:return: list of asociated group members as trigger objects
"""
params = {'includeOrphans': str(include_orphans).lower()}
url = self._service_url(['triggers', 'groups', group_id, 'members'], params=params)
return Trigger.list_to_object_list(self._get(url))
|
[
"def",
"group_members",
"(",
"self",
",",
"group_id",
",",
"include_orphans",
"=",
"False",
")",
":",
"params",
"=",
"{",
"'includeOrphans'",
":",
"str",
"(",
"include_orphans",
")",
".",
"lower",
"(",
")",
"}",
"url",
"=",
"self",
".",
"_service_url",
"(",
"[",
"'triggers'",
",",
"'groups'",
",",
"group_id",
",",
"'members'",
"]",
",",
"params",
"=",
"params",
")",
"return",
"Trigger",
".",
"list_to_object_list",
"(",
"self",
".",
"_get",
"(",
"url",
")",
")"
] |
Find all group member trigger definitions
:param group_id: group trigger id
:param include_orphans: If True, include orphan members
:return: list of asociated group members as trigger objects
|
[
"Find",
"all",
"group",
"member",
"trigger",
"definitions"
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L212-L222
|
train
|
hawkular/hawkular-client-python
|
hawkular/alerts/triggers.py
|
AlertsTriggerClient.update_group
|
def update_group(self, group_id, trigger):
"""
Update an existing group trigger definition and its member definitions.
:param group_id: Group trigger id to be updated
:param trigger: Trigger object, the group trigger to be updated
"""
data = self._serialize_object(trigger)
self._put(self._service_url(['triggers', 'groups', group_id]), data, parse_json=False)
|
python
|
def update_group(self, group_id, trigger):
"""
Update an existing group trigger definition and its member definitions.
:param group_id: Group trigger id to be updated
:param trigger: Trigger object, the group trigger to be updated
"""
data = self._serialize_object(trigger)
self._put(self._service_url(['triggers', 'groups', group_id]), data, parse_json=False)
|
[
"def",
"update_group",
"(",
"self",
",",
"group_id",
",",
"trigger",
")",
":",
"data",
"=",
"self",
".",
"_serialize_object",
"(",
"trigger",
")",
"self",
".",
"_put",
"(",
"self",
".",
"_service_url",
"(",
"[",
"'triggers'",
",",
"'groups'",
",",
"group_id",
"]",
")",
",",
"data",
",",
"parse_json",
"=",
"False",
")"
] |
Update an existing group trigger definition and its member definitions.
:param group_id: Group trigger id to be updated
:param trigger: Trigger object, the group trigger to be updated
|
[
"Update",
"an",
"existing",
"group",
"trigger",
"definition",
"and",
"its",
"member",
"definitions",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L224-L232
|
train
|
hawkular/hawkular-client-python
|
hawkular/alerts/triggers.py
|
AlertsTriggerClient.delete_group
|
def delete_group(self, group_id, keep_non_orphans=False, keep_orphans=False):
"""
Delete a group trigger
:param group_id: ID of the group trigger to delete
:param keep_non_orphans: if True converts the non-orphan member triggers to standard triggers
:param keep_orphans: if True converts the orphan member triggers to standard triggers
"""
params = {'keepNonOrphans': str(keep_non_orphans).lower(), 'keepOrphans': str(keep_orphans).lower()}
self._delete(self._service_url(['triggers', 'groups', group_id], params=params))
|
python
|
def delete_group(self, group_id, keep_non_orphans=False, keep_orphans=False):
"""
Delete a group trigger
:param group_id: ID of the group trigger to delete
:param keep_non_orphans: if True converts the non-orphan member triggers to standard triggers
:param keep_orphans: if True converts the orphan member triggers to standard triggers
"""
params = {'keepNonOrphans': str(keep_non_orphans).lower(), 'keepOrphans': str(keep_orphans).lower()}
self._delete(self._service_url(['triggers', 'groups', group_id], params=params))
|
[
"def",
"delete_group",
"(",
"self",
",",
"group_id",
",",
"keep_non_orphans",
"=",
"False",
",",
"keep_orphans",
"=",
"False",
")",
":",
"params",
"=",
"{",
"'keepNonOrphans'",
":",
"str",
"(",
"keep_non_orphans",
")",
".",
"lower",
"(",
")",
",",
"'keepOrphans'",
":",
"str",
"(",
"keep_orphans",
")",
".",
"lower",
"(",
")",
"}",
"self",
".",
"_delete",
"(",
"self",
".",
"_service_url",
"(",
"[",
"'triggers'",
",",
"'groups'",
",",
"group_id",
"]",
",",
"params",
"=",
"params",
")",
")"
] |
Delete a group trigger
:param group_id: ID of the group trigger to delete
:param keep_non_orphans: if True converts the non-orphan member triggers to standard triggers
:param keep_orphans: if True converts the orphan member triggers to standard triggers
|
[
"Delete",
"a",
"group",
"trigger"
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L234-L243
|
train
|
hawkular/hawkular-client-python
|
hawkular/alerts/triggers.py
|
AlertsTriggerClient.create_group_member
|
def create_group_member(self, member):
"""
Create a new member trigger for a parent trigger.
:param member: Group member trigger to be created
:type member: GroupMemberInfo
:return: A member Trigger object
"""
data = self._serialize_object(member)
return Trigger(self._post(self._service_url(['triggers', 'groups', 'members']), data))
|
python
|
def create_group_member(self, member):
"""
Create a new member trigger for a parent trigger.
:param member: Group member trigger to be created
:type member: GroupMemberInfo
:return: A member Trigger object
"""
data = self._serialize_object(member)
return Trigger(self._post(self._service_url(['triggers', 'groups', 'members']), data))
|
[
"def",
"create_group_member",
"(",
"self",
",",
"member",
")",
":",
"data",
"=",
"self",
".",
"_serialize_object",
"(",
"member",
")",
"return",
"Trigger",
"(",
"self",
".",
"_post",
"(",
"self",
".",
"_service_url",
"(",
"[",
"'triggers'",
",",
"'groups'",
",",
"'members'",
"]",
")",
",",
"data",
")",
")"
] |
Create a new member trigger for a parent trigger.
:param member: Group member trigger to be created
:type member: GroupMemberInfo
:return: A member Trigger object
|
[
"Create",
"a",
"new",
"member",
"trigger",
"for",
"a",
"parent",
"trigger",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L245-L254
|
train
|
hawkular/hawkular-client-python
|
hawkular/alerts/triggers.py
|
AlertsTriggerClient.set_group_conditions
|
def set_group_conditions(self, group_id, conditions, trigger_mode=None):
"""
Set the group conditions.
This replaces any existing conditions on the group and member conditions for all trigger modes.
:param group_id: Group to be updated
:param conditions: New conditions to replace old ones
:param trigger_mode: Optional TriggerMode used
:type conditions: GroupConditionsInfo
:type trigger_mode: TriggerMode
:return: The new Group conditions
"""
data = self._serialize_object(conditions)
if trigger_mode is not None:
url = self._service_url(['triggers', 'groups', group_id, 'conditions', trigger_mode])
else:
url = self._service_url(['triggers', 'groups', group_id, 'conditions'])
response = self._put(url, data)
return Condition.list_to_object_list(response)
|
python
|
def set_group_conditions(self, group_id, conditions, trigger_mode=None):
"""
Set the group conditions.
This replaces any existing conditions on the group and member conditions for all trigger modes.
:param group_id: Group to be updated
:param conditions: New conditions to replace old ones
:param trigger_mode: Optional TriggerMode used
:type conditions: GroupConditionsInfo
:type trigger_mode: TriggerMode
:return: The new Group conditions
"""
data = self._serialize_object(conditions)
if trigger_mode is not None:
url = self._service_url(['triggers', 'groups', group_id, 'conditions', trigger_mode])
else:
url = self._service_url(['triggers', 'groups', group_id, 'conditions'])
response = self._put(url, data)
return Condition.list_to_object_list(response)
|
[
"def",
"set_group_conditions",
"(",
"self",
",",
"group_id",
",",
"conditions",
",",
"trigger_mode",
"=",
"None",
")",
":",
"data",
"=",
"self",
".",
"_serialize_object",
"(",
"conditions",
")",
"if",
"trigger_mode",
"is",
"not",
"None",
":",
"url",
"=",
"self",
".",
"_service_url",
"(",
"[",
"'triggers'",
",",
"'groups'",
",",
"group_id",
",",
"'conditions'",
",",
"trigger_mode",
"]",
")",
"else",
":",
"url",
"=",
"self",
".",
"_service_url",
"(",
"[",
"'triggers'",
",",
"'groups'",
",",
"group_id",
",",
"'conditions'",
"]",
")",
"response",
"=",
"self",
".",
"_put",
"(",
"url",
",",
"data",
")",
"return",
"Condition",
".",
"list_to_object_list",
"(",
"response",
")"
] |
Set the group conditions.
This replaces any existing conditions on the group and member conditions for all trigger modes.
:param group_id: Group to be updated
:param conditions: New conditions to replace old ones
:param trigger_mode: Optional TriggerMode used
:type conditions: GroupConditionsInfo
:type trigger_mode: TriggerMode
:return: The new Group conditions
|
[
"Set",
"the",
"group",
"conditions",
"."
] |
52371f9ebabbe310efee2a8ff8eb735ccc0654bb
|
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/alerts/triggers.py#L256-L277
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.