repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
lreis2415/PyGeoC | pygeoc/postTauDEM.py | DinfUtil.check_orthogonal | def check_orthogonal(angle):
"""Check the given Dinf angle based on D8 flow direction encoding code by ArcGIS"""
flow_dir_taudem = -1
flow_dir = -1
if MathClass.floatequal(angle, FlowModelConst.e):
flow_dir_taudem = FlowModelConst.e
flow_dir = 1
elif MathClass.floatequal(angle, FlowModelConst.ne):
flow_dir_taudem = FlowModelConst.ne
flow_dir = 128
elif MathClass.floatequal(angle, FlowModelConst.n):
flow_dir_taudem = FlowModelConst.n
flow_dir = 64
elif MathClass.floatequal(angle, FlowModelConst.nw):
flow_dir_taudem = FlowModelConst.nw
flow_dir = 32
elif MathClass.floatequal(angle, FlowModelConst.w):
flow_dir_taudem = FlowModelConst.w
flow_dir = 16
elif MathClass.floatequal(angle, FlowModelConst.sw):
flow_dir_taudem = FlowModelConst.sw
flow_dir = 8
elif MathClass.floatequal(angle, FlowModelConst.s):
flow_dir_taudem = FlowModelConst.s
flow_dir = 4
elif MathClass.floatequal(angle, FlowModelConst.se):
flow_dir_taudem = FlowModelConst.se
flow_dir = 2
return flow_dir_taudem, flow_dir | python | def check_orthogonal(angle):
"""Check the given Dinf angle based on D8 flow direction encoding code by ArcGIS"""
flow_dir_taudem = -1
flow_dir = -1
if MathClass.floatequal(angle, FlowModelConst.e):
flow_dir_taudem = FlowModelConst.e
flow_dir = 1
elif MathClass.floatequal(angle, FlowModelConst.ne):
flow_dir_taudem = FlowModelConst.ne
flow_dir = 128
elif MathClass.floatequal(angle, FlowModelConst.n):
flow_dir_taudem = FlowModelConst.n
flow_dir = 64
elif MathClass.floatequal(angle, FlowModelConst.nw):
flow_dir_taudem = FlowModelConst.nw
flow_dir = 32
elif MathClass.floatequal(angle, FlowModelConst.w):
flow_dir_taudem = FlowModelConst.w
flow_dir = 16
elif MathClass.floatequal(angle, FlowModelConst.sw):
flow_dir_taudem = FlowModelConst.sw
flow_dir = 8
elif MathClass.floatequal(angle, FlowModelConst.s):
flow_dir_taudem = FlowModelConst.s
flow_dir = 4
elif MathClass.floatequal(angle, FlowModelConst.se):
flow_dir_taudem = FlowModelConst.se
flow_dir = 2
return flow_dir_taudem, flow_dir | [
"def",
"check_orthogonal",
"(",
"angle",
")",
":",
"flow_dir_taudem",
"=",
"-",
"1",
"flow_dir",
"=",
"-",
"1",
"if",
"MathClass",
".",
"floatequal",
"(",
"angle",
",",
"FlowModelConst",
".",
"e",
")",
":",
"flow_dir_taudem",
"=",
"FlowModelConst",
".",
"e",
"flow_dir",
"=",
"1",
"elif",
"MathClass",
".",
"floatequal",
"(",
"angle",
",",
"FlowModelConst",
".",
"ne",
")",
":",
"flow_dir_taudem",
"=",
"FlowModelConst",
".",
"ne",
"flow_dir",
"=",
"128",
"elif",
"MathClass",
".",
"floatequal",
"(",
"angle",
",",
"FlowModelConst",
".",
"n",
")",
":",
"flow_dir_taudem",
"=",
"FlowModelConst",
".",
"n",
"flow_dir",
"=",
"64",
"elif",
"MathClass",
".",
"floatequal",
"(",
"angle",
",",
"FlowModelConst",
".",
"nw",
")",
":",
"flow_dir_taudem",
"=",
"FlowModelConst",
".",
"nw",
"flow_dir",
"=",
"32",
"elif",
"MathClass",
".",
"floatequal",
"(",
"angle",
",",
"FlowModelConst",
".",
"w",
")",
":",
"flow_dir_taudem",
"=",
"FlowModelConst",
".",
"w",
"flow_dir",
"=",
"16",
"elif",
"MathClass",
".",
"floatequal",
"(",
"angle",
",",
"FlowModelConst",
".",
"sw",
")",
":",
"flow_dir_taudem",
"=",
"FlowModelConst",
".",
"sw",
"flow_dir",
"=",
"8",
"elif",
"MathClass",
".",
"floatequal",
"(",
"angle",
",",
"FlowModelConst",
".",
"s",
")",
":",
"flow_dir_taudem",
"=",
"FlowModelConst",
".",
"s",
"flow_dir",
"=",
"4",
"elif",
"MathClass",
".",
"floatequal",
"(",
"angle",
",",
"FlowModelConst",
".",
"se",
")",
":",
"flow_dir_taudem",
"=",
"FlowModelConst",
".",
"se",
"flow_dir",
"=",
"2",
"return",
"flow_dir_taudem",
",",
"flow_dir"
] | Check the given Dinf angle based on D8 flow direction encoding code by ArcGIS | [
"Check",
"the",
"given",
"Dinf",
"angle",
"based",
"on",
"D8",
"flow",
"direction",
"encoding",
"code",
"by",
"ArcGIS"
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/postTauDEM.py#L37-L65 | train |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/instruments/instrument_dummy.py | Plant.start | def start(self, *args, **kwargs):
"""
start the instrument thread
"""
self._stop = False
super(Plant, self).start(*args, **kwargs) | python | def start(self, *args, **kwargs):
"""
start the instrument thread
"""
self._stop = False
super(Plant, self).start(*args, **kwargs) | [
"def",
"start",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_stop",
"=",
"False",
"super",
"(",
"Plant",
",",
"self",
")",
".",
"start",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | start the instrument thread | [
"start",
"the",
"instrument",
"thread"
] | 67482e5157fcd1c40705e5c2cacfb93564703ed0 | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/instruments/instrument_dummy.py#L121-L127 | train |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/instruments/instrument_dummy.py | Plant.quit | def quit(self, *args, **kwargs): # real signature unknown
"""
quit the instrument thread
"""
self.stop()
self._stop = True
self.msleep(2* int(1e3 / self.settings['update frequency']))
super(Plant, self).quit(*args, **kwargs) | python | def quit(self, *args, **kwargs): # real signature unknown
"""
quit the instrument thread
"""
self.stop()
self._stop = True
self.msleep(2* int(1e3 / self.settings['update frequency']))
super(Plant, self).quit(*args, **kwargs) | [
"def",
"quit",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# real signature unknown",
"self",
".",
"stop",
"(",
")",
"self",
".",
"_stop",
"=",
"True",
"self",
".",
"msleep",
"(",
"2",
"*",
"int",
"(",
"1e3",
"/",
"self",
".",
"settings",
"[",
"'update frequency'",
"]",
")",
")",
"super",
"(",
"Plant",
",",
"self",
")",
".",
"quit",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | quit the instrument thread | [
"quit",
"the",
"instrument",
"thread"
] | 67482e5157fcd1c40705e5c2cacfb93564703ed0 | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/instruments/instrument_dummy.py#L130-L137 | train |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/instruments/instrument_dummy.py | PIControler.controler_output | def controler_output(self, current_value):
"""
Calculate PI output value for given reference input and feedback
"""
set_point = self.settings['set_point']
Kp = self.settings['gains']['proportional']
Ki = self.settings['gains']['integral']
output_range = self.settings['output_range']
time_step = self.settings['time_step']
error_new = set_point - current_value
print(('PD- error:\t', error_new, Ki, Kp, time_step))
#proportional action
self.u_P = Kp * error_new * time_step
print(('PD- self.u_P:\t', self.u_P, self.u_I))
#integral action
self.u_I += Kp * Ki * (error_new + self.error) / 2.0 * time_step
self.error = error_new
print(('PD- self.u_P:\t', self.u_P, self.u_I))
# anti-windup
if self.u_P + self.u_I > output_range['max']:
self.u_I = output_range['max']-self.u_P
if self.u_P + self.u_I < output_range['min']:
self.u_I = output_range['min']-self.u_P
output = self.u_P + self.u_I
print(('PD- output:\t', output))
return output | python | def controler_output(self, current_value):
"""
Calculate PI output value for given reference input and feedback
"""
set_point = self.settings['set_point']
Kp = self.settings['gains']['proportional']
Ki = self.settings['gains']['integral']
output_range = self.settings['output_range']
time_step = self.settings['time_step']
error_new = set_point - current_value
print(('PD- error:\t', error_new, Ki, Kp, time_step))
#proportional action
self.u_P = Kp * error_new * time_step
print(('PD- self.u_P:\t', self.u_P, self.u_I))
#integral action
self.u_I += Kp * Ki * (error_new + self.error) / 2.0 * time_step
self.error = error_new
print(('PD- self.u_P:\t', self.u_P, self.u_I))
# anti-windup
if self.u_P + self.u_I > output_range['max']:
self.u_I = output_range['max']-self.u_P
if self.u_P + self.u_I < output_range['min']:
self.u_I = output_range['min']-self.u_P
output = self.u_P + self.u_I
print(('PD- output:\t', output))
return output | [
"def",
"controler_output",
"(",
"self",
",",
"current_value",
")",
":",
"set_point",
"=",
"self",
".",
"settings",
"[",
"'set_point'",
"]",
"Kp",
"=",
"self",
".",
"settings",
"[",
"'gains'",
"]",
"[",
"'proportional'",
"]",
"Ki",
"=",
"self",
".",
"settings",
"[",
"'gains'",
"]",
"[",
"'integral'",
"]",
"output_range",
"=",
"self",
".",
"settings",
"[",
"'output_range'",
"]",
"time_step",
"=",
"self",
".",
"settings",
"[",
"'time_step'",
"]",
"error_new",
"=",
"set_point",
"-",
"current_value",
"print",
"(",
"(",
"'PD- error:\\t'",
",",
"error_new",
",",
"Ki",
",",
"Kp",
",",
"time_step",
")",
")",
"#proportional action",
"self",
".",
"u_P",
"=",
"Kp",
"*",
"error_new",
"*",
"time_step",
"print",
"(",
"(",
"'PD- self.u_P:\\t'",
",",
"self",
".",
"u_P",
",",
"self",
".",
"u_I",
")",
")",
"#integral action",
"self",
".",
"u_I",
"+=",
"Kp",
"*",
"Ki",
"*",
"(",
"error_new",
"+",
"self",
".",
"error",
")",
"/",
"2.0",
"*",
"time_step",
"self",
".",
"error",
"=",
"error_new",
"print",
"(",
"(",
"'PD- self.u_P:\\t'",
",",
"self",
".",
"u_P",
",",
"self",
".",
"u_I",
")",
")",
"# anti-windup",
"if",
"self",
".",
"u_P",
"+",
"self",
".",
"u_I",
">",
"output_range",
"[",
"'max'",
"]",
":",
"self",
".",
"u_I",
"=",
"output_range",
"[",
"'max'",
"]",
"-",
"self",
".",
"u_P",
"if",
"self",
".",
"u_P",
"+",
"self",
".",
"u_I",
"<",
"output_range",
"[",
"'min'",
"]",
":",
"self",
".",
"u_I",
"=",
"output_range",
"[",
"'min'",
"]",
"-",
"self",
".",
"u_P",
"output",
"=",
"self",
".",
"u_P",
"+",
"self",
".",
"u_I",
"print",
"(",
"(",
"'PD- output:\\t'",
",",
"output",
")",
")",
"return",
"output"
] | Calculate PI output value for given reference input and feedback | [
"Calculate",
"PI",
"output",
"value",
"for",
"given",
"reference",
"input",
"and",
"feedback"
] | 67482e5157fcd1c40705e5c2cacfb93564703ed0 | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/instruments/instrument_dummy.py#L226-L259 | train |
rosshamish/catan-py | catan/boardbuilder.py | get_opts | def get_opts(opts):
"""
Validate options and apply defaults for options not supplied.
:param opts: dictionary mapping str->str.
:return: dictionary mapping str->Opt. All possible keys are present.
"""
defaults = {
'board': None,
'terrain': Opt.random,
'numbers': Opt.preset,
'ports': Opt.preset,
'pieces': Opt.preset,
'players': Opt.preset,
}
_opts = defaults.copy()
if opts is None:
opts = dict()
try:
for key, val in opts.copy().items():
if key == 'board':
# board is a string, not a regular opt, and gets special handling
# in _read_tiles_from_string
continue
opts[key] = Opt(val)
_opts.update(opts)
except Exception:
raise ValueError('Invalid options={}'.format(opts))
logging.debug('used defaults=\n{}\n on opts=\n{}\nreturned total opts=\n{}'.format(
pprint.pformat(defaults),
pprint.pformat(opts),
pprint.pformat(_opts)))
return _opts | python | def get_opts(opts):
"""
Validate options and apply defaults for options not supplied.
:param opts: dictionary mapping str->str.
:return: dictionary mapping str->Opt. All possible keys are present.
"""
defaults = {
'board': None,
'terrain': Opt.random,
'numbers': Opt.preset,
'ports': Opt.preset,
'pieces': Opt.preset,
'players': Opt.preset,
}
_opts = defaults.copy()
if opts is None:
opts = dict()
try:
for key, val in opts.copy().items():
if key == 'board':
# board is a string, not a regular opt, and gets special handling
# in _read_tiles_from_string
continue
opts[key] = Opt(val)
_opts.update(opts)
except Exception:
raise ValueError('Invalid options={}'.format(opts))
logging.debug('used defaults=\n{}\n on opts=\n{}\nreturned total opts=\n{}'.format(
pprint.pformat(defaults),
pprint.pformat(opts),
pprint.pformat(_opts)))
return _opts | [
"def",
"get_opts",
"(",
"opts",
")",
":",
"defaults",
"=",
"{",
"'board'",
":",
"None",
",",
"'terrain'",
":",
"Opt",
".",
"random",
",",
"'numbers'",
":",
"Opt",
".",
"preset",
",",
"'ports'",
":",
"Opt",
".",
"preset",
",",
"'pieces'",
":",
"Opt",
".",
"preset",
",",
"'players'",
":",
"Opt",
".",
"preset",
",",
"}",
"_opts",
"=",
"defaults",
".",
"copy",
"(",
")",
"if",
"opts",
"is",
"None",
":",
"opts",
"=",
"dict",
"(",
")",
"try",
":",
"for",
"key",
",",
"val",
"in",
"opts",
".",
"copy",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'board'",
":",
"# board is a string, not a regular opt, and gets special handling",
"# in _read_tiles_from_string",
"continue",
"opts",
"[",
"key",
"]",
"=",
"Opt",
"(",
"val",
")",
"_opts",
".",
"update",
"(",
"opts",
")",
"except",
"Exception",
":",
"raise",
"ValueError",
"(",
"'Invalid options={}'",
".",
"format",
"(",
"opts",
")",
")",
"logging",
".",
"debug",
"(",
"'used defaults=\\n{}\\n on opts=\\n{}\\nreturned total opts=\\n{}'",
".",
"format",
"(",
"pprint",
".",
"pformat",
"(",
"defaults",
")",
",",
"pprint",
".",
"pformat",
"(",
"opts",
")",
",",
"pprint",
".",
"pformat",
"(",
"_opts",
")",
")",
")",
"return",
"_opts"
] | Validate options and apply defaults for options not supplied.
:param opts: dictionary mapping str->str.
:return: dictionary mapping str->Opt. All possible keys are present. | [
"Validate",
"options",
"and",
"apply",
"defaults",
"for",
"options",
"not",
"supplied",
"."
] | 120438a8f16e39c13322c5d5930e1064e1d3f4be | https://github.com/rosshamish/catan-py/blob/120438a8f16e39c13322c5d5930e1064e1d3f4be/catan/boardbuilder.py#L40-L72 | train |
rosshamish/catan-py | catan/boardbuilder.py | _get_tiles | def _get_tiles(board=None, terrain=None, numbers=None):
"""
Generate a list of tiles using the given terrain and numbers options.
terrain options supported:
- Opt.empty -> all tiles are desert
- Opt.random -> tiles are randomized
- Opt.preset ->
- Opt.debug -> alias for Opt.random
numbers options supported:
- Opt.empty -> no tiles have numbers
- Opt.random -> numbers are randomized
- Opt.preset ->
- Opt.debug -> alias for Opt.random
:param terrain_opts: Opt
:param numbers_opts: Opt
:return: list(Tile)
"""
if board is not None:
# we have a board given, ignore the terrain and numbers opts and log warnings
# if they were supplied
tiles = _read_tiles_from_string(board)
else:
# we are being asked to generate a board
tiles = _generate_tiles(terrain, numbers)
return tiles | python | def _get_tiles(board=None, terrain=None, numbers=None):
"""
Generate a list of tiles using the given terrain and numbers options.
terrain options supported:
- Opt.empty -> all tiles are desert
- Opt.random -> tiles are randomized
- Opt.preset ->
- Opt.debug -> alias for Opt.random
numbers options supported:
- Opt.empty -> no tiles have numbers
- Opt.random -> numbers are randomized
- Opt.preset ->
- Opt.debug -> alias for Opt.random
:param terrain_opts: Opt
:param numbers_opts: Opt
:return: list(Tile)
"""
if board is not None:
# we have a board given, ignore the terrain and numbers opts and log warnings
# if they were supplied
tiles = _read_tiles_from_string(board)
else:
# we are being asked to generate a board
tiles = _generate_tiles(terrain, numbers)
return tiles | [
"def",
"_get_tiles",
"(",
"board",
"=",
"None",
",",
"terrain",
"=",
"None",
",",
"numbers",
"=",
"None",
")",
":",
"if",
"board",
"is",
"not",
"None",
":",
"# we have a board given, ignore the terrain and numbers opts and log warnings",
"# if they were supplied",
"tiles",
"=",
"_read_tiles_from_string",
"(",
"board",
")",
"else",
":",
"# we are being asked to generate a board",
"tiles",
"=",
"_generate_tiles",
"(",
"terrain",
",",
"numbers",
")",
"return",
"tiles"
] | Generate a list of tiles using the given terrain and numbers options.
terrain options supported:
- Opt.empty -> all tiles are desert
- Opt.random -> tiles are randomized
- Opt.preset ->
- Opt.debug -> alias for Opt.random
numbers options supported:
- Opt.empty -> no tiles have numbers
- Opt.random -> numbers are randomized
- Opt.preset ->
- Opt.debug -> alias for Opt.random
:param terrain_opts: Opt
:param numbers_opts: Opt
:return: list(Tile) | [
"Generate",
"a",
"list",
"of",
"tiles",
"using",
"the",
"given",
"terrain",
"and",
"numbers",
"options",
"."
] | 120438a8f16e39c13322c5d5930e1064e1d3f4be | https://github.com/rosshamish/catan-py/blob/120438a8f16e39c13322c5d5930e1064e1d3f4be/catan/boardbuilder.py#L112-L140 | train |
rosshamish/catan-py | catan/boardbuilder.py | _get_ports | def _get_ports(port_opts):
"""
Generate a list of ports using the given options.
port options supported:
- Opt.empty ->
- Opt.random ->
- Opt.preset -> ports are in default locations
- Opt.debug -> alias for Opt.preset
:param port_opts: Opt
:return: list(Port)
"""
if port_opts in [Opt.preset, Opt.debug]:
_preset_ports = [(1, 'NW', catan.board.PortType.any3),
(2, 'W', catan.board.PortType.wood),
(4, 'W', catan.board.PortType.brick),
(5, 'SW', catan.board.PortType.any3),
(6, 'SE', catan.board.PortType.any3),
(8, 'SE', catan.board.PortType.sheep),
(9, 'E', catan.board.PortType.any3),
(10, 'NE', catan.board.PortType.ore),
(12, 'NE', catan.board.PortType.wheat)]
return [catan.board.Port(tile, dir, port_type)
for tile, dir, port_type in _preset_ports]
elif port_opts in [Opt.empty, Opt.random]:
logging.warning('{} option not yet implemented'.format(port_opts))
return [] | python | def _get_ports(port_opts):
"""
Generate a list of ports using the given options.
port options supported:
- Opt.empty ->
- Opt.random ->
- Opt.preset -> ports are in default locations
- Opt.debug -> alias for Opt.preset
:param port_opts: Opt
:return: list(Port)
"""
if port_opts in [Opt.preset, Opt.debug]:
_preset_ports = [(1, 'NW', catan.board.PortType.any3),
(2, 'W', catan.board.PortType.wood),
(4, 'W', catan.board.PortType.brick),
(5, 'SW', catan.board.PortType.any3),
(6, 'SE', catan.board.PortType.any3),
(8, 'SE', catan.board.PortType.sheep),
(9, 'E', catan.board.PortType.any3),
(10, 'NE', catan.board.PortType.ore),
(12, 'NE', catan.board.PortType.wheat)]
return [catan.board.Port(tile, dir, port_type)
for tile, dir, port_type in _preset_ports]
elif port_opts in [Opt.empty, Opt.random]:
logging.warning('{} option not yet implemented'.format(port_opts))
return [] | [
"def",
"_get_ports",
"(",
"port_opts",
")",
":",
"if",
"port_opts",
"in",
"[",
"Opt",
".",
"preset",
",",
"Opt",
".",
"debug",
"]",
":",
"_preset_ports",
"=",
"[",
"(",
"1",
",",
"'NW'",
",",
"catan",
".",
"board",
".",
"PortType",
".",
"any3",
")",
",",
"(",
"2",
",",
"'W'",
",",
"catan",
".",
"board",
".",
"PortType",
".",
"wood",
")",
",",
"(",
"4",
",",
"'W'",
",",
"catan",
".",
"board",
".",
"PortType",
".",
"brick",
")",
",",
"(",
"5",
",",
"'SW'",
",",
"catan",
".",
"board",
".",
"PortType",
".",
"any3",
")",
",",
"(",
"6",
",",
"'SE'",
",",
"catan",
".",
"board",
".",
"PortType",
".",
"any3",
")",
",",
"(",
"8",
",",
"'SE'",
",",
"catan",
".",
"board",
".",
"PortType",
".",
"sheep",
")",
",",
"(",
"9",
",",
"'E'",
",",
"catan",
".",
"board",
".",
"PortType",
".",
"any3",
")",
",",
"(",
"10",
",",
"'NE'",
",",
"catan",
".",
"board",
".",
"PortType",
".",
"ore",
")",
",",
"(",
"12",
",",
"'NE'",
",",
"catan",
".",
"board",
".",
"PortType",
".",
"wheat",
")",
"]",
"return",
"[",
"catan",
".",
"board",
".",
"Port",
"(",
"tile",
",",
"dir",
",",
"port_type",
")",
"for",
"tile",
",",
"dir",
",",
"port_type",
"in",
"_preset_ports",
"]",
"elif",
"port_opts",
"in",
"[",
"Opt",
".",
"empty",
",",
"Opt",
".",
"random",
"]",
":",
"logging",
".",
"warning",
"(",
"'{} option not yet implemented'",
".",
"format",
"(",
"port_opts",
")",
")",
"return",
"[",
"]"
] | Generate a list of ports using the given options.
port options supported:
- Opt.empty ->
- Opt.random ->
- Opt.preset -> ports are in default locations
- Opt.debug -> alias for Opt.preset
:param port_opts: Opt
:return: list(Port) | [
"Generate",
"a",
"list",
"of",
"ports",
"using",
"the",
"given",
"options",
"."
] | 120438a8f16e39c13322c5d5930e1064e1d3f4be | https://github.com/rosshamish/catan-py/blob/120438a8f16e39c13322c5d5930e1064e1d3f4be/catan/boardbuilder.py#L231-L258 | train |
rosshamish/catan-py | catan/boardbuilder.py | _get_pieces | def _get_pieces(tiles, ports, players_opts, pieces_opts):
"""
Generate a dictionary of pieces using the given options.
pieces options supported:
- Opt.empty -> no locations have pieces
- Opt.random ->
- Opt.preset -> robber is placed on the first desert found
- Opt.debug -> a variety of pieces are placed around the board
:param tiles: list of tiles from _generate_tiles
:param ports: list of ports from _generate_ports
:param players_opts: Opt
:param pieces_opts: Opt
:return: dictionary mapping (hexgrid.TYPE, coord:int) -> Piece
"""
if pieces_opts == Opt.empty:
return dict()
elif pieces_opts == Opt.debug:
players = catan.game.Game.get_debug_players()
return {
(hexgrid.NODE, 0x23): catan.pieces.Piece(catan.pieces.PieceType.settlement, players[0]),
(hexgrid.EDGE, 0x22): catan.pieces.Piece(catan.pieces.PieceType.road, players[0]),
(hexgrid.NODE, 0x67): catan.pieces.Piece(catan.pieces.PieceType.settlement, players[1]),
(hexgrid.EDGE, 0x98): catan.pieces.Piece(catan.pieces.PieceType.road, players[1]),
(hexgrid.NODE, 0x87): catan.pieces.Piece(catan.pieces.PieceType.settlement, players[2]),
(hexgrid.EDGE, 0x89): catan.pieces.Piece(catan.pieces.PieceType.road, players[2]),
(hexgrid.EDGE, 0xA9): catan.pieces.Piece(catan.pieces.PieceType.road, players[3]),
(hexgrid.TILE, 0x77): catan.pieces.Piece(catan.pieces.PieceType.robber, None),
}
elif pieces_opts in (Opt.preset, ):
deserts = filter(lambda tile: tile.terrain == catan.board.Terrain.desert, tiles)
coord = hexgrid.tile_id_to_coord(list(deserts)[0].tile_id)
return {
(hexgrid.TILE, coord): catan.pieces.Piece(catan.pieces.PieceType.robber, None)
}
elif pieces_opts in (Opt.random, ):
logging.warning('{} option not yet implemented'.format(pieces_opts)) | python | def _get_pieces(tiles, ports, players_opts, pieces_opts):
"""
Generate a dictionary of pieces using the given options.
pieces options supported:
- Opt.empty -> no locations have pieces
- Opt.random ->
- Opt.preset -> robber is placed on the first desert found
- Opt.debug -> a variety of pieces are placed around the board
:param tiles: list of tiles from _generate_tiles
:param ports: list of ports from _generate_ports
:param players_opts: Opt
:param pieces_opts: Opt
:return: dictionary mapping (hexgrid.TYPE, coord:int) -> Piece
"""
if pieces_opts == Opt.empty:
return dict()
elif pieces_opts == Opt.debug:
players = catan.game.Game.get_debug_players()
return {
(hexgrid.NODE, 0x23): catan.pieces.Piece(catan.pieces.PieceType.settlement, players[0]),
(hexgrid.EDGE, 0x22): catan.pieces.Piece(catan.pieces.PieceType.road, players[0]),
(hexgrid.NODE, 0x67): catan.pieces.Piece(catan.pieces.PieceType.settlement, players[1]),
(hexgrid.EDGE, 0x98): catan.pieces.Piece(catan.pieces.PieceType.road, players[1]),
(hexgrid.NODE, 0x87): catan.pieces.Piece(catan.pieces.PieceType.settlement, players[2]),
(hexgrid.EDGE, 0x89): catan.pieces.Piece(catan.pieces.PieceType.road, players[2]),
(hexgrid.EDGE, 0xA9): catan.pieces.Piece(catan.pieces.PieceType.road, players[3]),
(hexgrid.TILE, 0x77): catan.pieces.Piece(catan.pieces.PieceType.robber, None),
}
elif pieces_opts in (Opt.preset, ):
deserts = filter(lambda tile: tile.terrain == catan.board.Terrain.desert, tiles)
coord = hexgrid.tile_id_to_coord(list(deserts)[0].tile_id)
return {
(hexgrid.TILE, coord): catan.pieces.Piece(catan.pieces.PieceType.robber, None)
}
elif pieces_opts in (Opt.random, ):
logging.warning('{} option not yet implemented'.format(pieces_opts)) | [
"def",
"_get_pieces",
"(",
"tiles",
",",
"ports",
",",
"players_opts",
",",
"pieces_opts",
")",
":",
"if",
"pieces_opts",
"==",
"Opt",
".",
"empty",
":",
"return",
"dict",
"(",
")",
"elif",
"pieces_opts",
"==",
"Opt",
".",
"debug",
":",
"players",
"=",
"catan",
".",
"game",
".",
"Game",
".",
"get_debug_players",
"(",
")",
"return",
"{",
"(",
"hexgrid",
".",
"NODE",
",",
"0x23",
")",
":",
"catan",
".",
"pieces",
".",
"Piece",
"(",
"catan",
".",
"pieces",
".",
"PieceType",
".",
"settlement",
",",
"players",
"[",
"0",
"]",
")",
",",
"(",
"hexgrid",
".",
"EDGE",
",",
"0x22",
")",
":",
"catan",
".",
"pieces",
".",
"Piece",
"(",
"catan",
".",
"pieces",
".",
"PieceType",
".",
"road",
",",
"players",
"[",
"0",
"]",
")",
",",
"(",
"hexgrid",
".",
"NODE",
",",
"0x67",
")",
":",
"catan",
".",
"pieces",
".",
"Piece",
"(",
"catan",
".",
"pieces",
".",
"PieceType",
".",
"settlement",
",",
"players",
"[",
"1",
"]",
")",
",",
"(",
"hexgrid",
".",
"EDGE",
",",
"0x98",
")",
":",
"catan",
".",
"pieces",
".",
"Piece",
"(",
"catan",
".",
"pieces",
".",
"PieceType",
".",
"road",
",",
"players",
"[",
"1",
"]",
")",
",",
"(",
"hexgrid",
".",
"NODE",
",",
"0x87",
")",
":",
"catan",
".",
"pieces",
".",
"Piece",
"(",
"catan",
".",
"pieces",
".",
"PieceType",
".",
"settlement",
",",
"players",
"[",
"2",
"]",
")",
",",
"(",
"hexgrid",
".",
"EDGE",
",",
"0x89",
")",
":",
"catan",
".",
"pieces",
".",
"Piece",
"(",
"catan",
".",
"pieces",
".",
"PieceType",
".",
"road",
",",
"players",
"[",
"2",
"]",
")",
",",
"(",
"hexgrid",
".",
"EDGE",
",",
"0xA9",
")",
":",
"catan",
".",
"pieces",
".",
"Piece",
"(",
"catan",
".",
"pieces",
".",
"PieceType",
".",
"road",
",",
"players",
"[",
"3",
"]",
")",
",",
"(",
"hexgrid",
".",
"TILE",
",",
"0x77",
")",
":",
"catan",
".",
"pieces",
".",
"Piece",
"(",
"catan",
".",
"pieces",
".",
"PieceType",
".",
"robber",
",",
"None",
")",
",",
"}",
"elif",
"pieces_opts",
"in",
"(",
"Opt",
".",
"preset",
",",
")",
":",
"deserts",
"=",
"filter",
"(",
"lambda",
"tile",
":",
"tile",
".",
"terrain",
"==",
"catan",
".",
"board",
".",
"Terrain",
".",
"desert",
",",
"tiles",
")",
"coord",
"=",
"hexgrid",
".",
"tile_id_to_coord",
"(",
"list",
"(",
"deserts",
")",
"[",
"0",
"]",
".",
"tile_id",
")",
"return",
"{",
"(",
"hexgrid",
".",
"TILE",
",",
"coord",
")",
":",
"catan",
".",
"pieces",
".",
"Piece",
"(",
"catan",
".",
"pieces",
".",
"PieceType",
".",
"robber",
",",
"None",
")",
"}",
"elif",
"pieces_opts",
"in",
"(",
"Opt",
".",
"random",
",",
")",
":",
"logging",
".",
"warning",
"(",
"'{} option not yet implemented'",
".",
"format",
"(",
"pieces_opts",
")",
")"
] | Generate a dictionary of pieces using the given options.
pieces options supported:
- Opt.empty -> no locations have pieces
- Opt.random ->
- Opt.preset -> robber is placed on the first desert found
- Opt.debug -> a variety of pieces are placed around the board
:param tiles: list of tiles from _generate_tiles
:param ports: list of ports from _generate_ports
:param players_opts: Opt
:param pieces_opts: Opt
:return: dictionary mapping (hexgrid.TYPE, coord:int) -> Piece | [
"Generate",
"a",
"dictionary",
"of",
"pieces",
"using",
"the",
"given",
"options",
"."
] | 120438a8f16e39c13322c5d5930e1064e1d3f4be | https://github.com/rosshamish/catan-py/blob/120438a8f16e39c13322c5d5930e1064e1d3f4be/catan/boardbuilder.py#L261-L298 | train |
nmdp-bioinformatics/SeqAnn | seqann/feature_client/apis/features_api.py | FeaturesApi.create_feature | def create_feature(self, **kwargs):
"""
Create an enumerated sequence feature
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_feature(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FeatureRequest body:
:return: Feature
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_feature_with_http_info(**kwargs)
else:
(data) = self.create_feature_with_http_info(**kwargs)
return data | python | def create_feature(self, **kwargs):
"""
Create an enumerated sequence feature
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_feature(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FeatureRequest body:
:return: Feature
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_feature_with_http_info(**kwargs)
else:
(data) = self.create_feature_with_http_info(**kwargs)
return data | [
"def",
"create_feature",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"create_feature_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"create_feature_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | Create an enumerated sequence feature
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_feature(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FeatureRequest body:
:return: Feature
If the method is called asynchronously,
returns the request thread. | [
"Create",
"an",
"enumerated",
"sequence",
"feature"
] | 5ce91559b0a4fbe4fb7758e034eb258202632463 | https://github.com/nmdp-bioinformatics/SeqAnn/blob/5ce91559b0a4fbe4fb7758e034eb258202632463/seqann/feature_client/apis/features_api.py#L53-L78 | train |
nmdp-bioinformatics/SeqAnn | seqann/feature_client/apis/features_api.py | FeaturesApi.list_features | def list_features(self, locus, **kwargs):
"""
List the enumerated sequence features at a locus
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_features(locus, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str locus: locus name or URI (required)
:return: list[Feature]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_features_with_http_info(locus, **kwargs)
else:
(data) = self.list_features_with_http_info(locus, **kwargs)
return data | python | def list_features(self, locus, **kwargs):
"""
List the enumerated sequence features at a locus
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_features(locus, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str locus: locus name or URI (required)
:return: list[Feature]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_features_with_http_info(locus, **kwargs)
else:
(data) = self.list_features_with_http_info(locus, **kwargs)
return data | [
"def",
"list_features",
"(",
"self",
",",
"locus",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"list_features_with_http_info",
"(",
"locus",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"list_features_with_http_info",
"(",
"locus",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | List the enumerated sequence features at a locus
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_features(locus, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str locus: locus name or URI (required)
:return: list[Feature]
If the method is called asynchronously,
returns the request thread. | [
"List",
"the",
"enumerated",
"sequence",
"features",
"at",
"a",
"locus"
] | 5ce91559b0a4fbe4fb7758e034eb258202632463 | https://github.com/nmdp-bioinformatics/SeqAnn/blob/5ce91559b0a4fbe4fb7758e034eb258202632463/seqann/feature_client/apis/features_api.py#L392-L417 | train |
nmdp-bioinformatics/SeqAnn | seqann/feature_client/apis/features_api.py | FeaturesApi.list_features_0 | def list_features_0(self, locus, term, **kwargs):
"""
List the enumerated sequence features matching a term at a locus
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_features_0(locus, term, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str locus: locus name or URI (required)
:param str term: Sequence Ontology (SO) term name, accession, or URI (required)
:return: list[Feature]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_features_0_with_http_info(locus, term, **kwargs)
else:
(data) = self.list_features_0_with_http_info(locus, term, **kwargs)
return data | python | def list_features_0(self, locus, term, **kwargs):
"""
List the enumerated sequence features matching a term at a locus
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_features_0(locus, term, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str locus: locus name or URI (required)
:param str term: Sequence Ontology (SO) term name, accession, or URI (required)
:return: list[Feature]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_features_0_with_http_info(locus, term, **kwargs)
else:
(data) = self.list_features_0_with_http_info(locus, term, **kwargs)
return data | [
"def",
"list_features_0",
"(",
"self",
",",
"locus",
",",
"term",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"list_features_0_with_http_info",
"(",
"locus",
",",
"term",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"list_features_0_with_http_info",
"(",
"locus",
",",
"term",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | List the enumerated sequence features matching a term at a locus
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_features_0(locus, term, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str locus: locus name or URI (required)
:param str term: Sequence Ontology (SO) term name, accession, or URI (required)
:return: list[Feature]
If the method is called asynchronously,
returns the request thread. | [
"List",
"the",
"enumerated",
"sequence",
"features",
"matching",
"a",
"term",
"at",
"a",
"locus"
] | 5ce91559b0a4fbe4fb7758e034eb258202632463 | https://github.com/nmdp-bioinformatics/SeqAnn/blob/5ce91559b0a4fbe4fb7758e034eb258202632463/seqann/feature_client/apis/features_api.py#L496-L522 | train |
nmdp-bioinformatics/SeqAnn | seqann/feature_client/apis/features_api.py | FeaturesApi.list_features_1 | def list_features_1(self, locus, term, rank, **kwargs):
"""
List the enumerated sequence features matching a term and rank at a locus
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_features_1(locus, term, rank, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str locus: locus name or URI (required)
:param str term: Sequence Ontology (SO) term name, accession, or URI (required)
:param int rank: feature rank, must be at least 1 (required)
:return: list[Feature]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_features_1_with_http_info(locus, term, rank, **kwargs)
else:
(data) = self.list_features_1_with_http_info(locus, term, rank, **kwargs)
return data | python | def list_features_1(self, locus, term, rank, **kwargs):
"""
List the enumerated sequence features matching a term and rank at a locus
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_features_1(locus, term, rank, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str locus: locus name or URI (required)
:param str term: Sequence Ontology (SO) term name, accession, or URI (required)
:param int rank: feature rank, must be at least 1 (required)
:return: list[Feature]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_features_1_with_http_info(locus, term, rank, **kwargs)
else:
(data) = self.list_features_1_with_http_info(locus, term, rank, **kwargs)
return data | [
"def",
"list_features_1",
"(",
"self",
",",
"locus",
",",
"term",
",",
"rank",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"list_features_1_with_http_info",
"(",
"locus",
",",
"term",
",",
"rank",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"list_features_1_with_http_info",
"(",
"locus",
",",
"term",
",",
"rank",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | List the enumerated sequence features matching a term and rank at a locus
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_features_1(locus, term, rank, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str locus: locus name or URI (required)
:param str term: Sequence Ontology (SO) term name, accession, or URI (required)
:param int rank: feature rank, must be at least 1 (required)
:return: list[Feature]
If the method is called asynchronously,
returns the request thread. | [
"List",
"the",
"enumerated",
"sequence",
"features",
"matching",
"a",
"term",
"and",
"rank",
"at",
"a",
"locus"
] | 5ce91559b0a4fbe4fb7758e034eb258202632463 | https://github.com/nmdp-bioinformatics/SeqAnn/blob/5ce91559b0a4fbe4fb7758e034eb258202632463/seqann/feature_client/apis/features_api.py#L607-L634 | train |
tryfer/tryfer | tryfer/http.py | TracingAgent.request | def request(self, method, uri, headers=None, bodyProducer=None):
"""
Send a client request following HTTP redirects.
@see: L{Agent.request}.
"""
if self._parent_trace is None:
trace = Trace(method)
else:
trace = self._parent_trace.child(method)
if self._endpoint is not None:
trace.set_endpoint(self._endpoint)
if headers is None:
headers = Headers({})
# These headers are based on the headers used by finagle's tracing
# http Codec.
#
# https://github.com/twitter/finagle/blob/master/finagle-http/
#
# Currently not implemented are X-B3-Sampled and X-B3-Flags
# Tryfer's underlying Trace implementation has no notion of a Sampled
# trace and I haven't figured out what flags are for.
headers.setRawHeaders('X-B3-TraceId', [hex_str(trace.trace_id)])
headers.setRawHeaders('X-B3-SpanId', [hex_str(trace.span_id)])
if trace.parent_span_id is not None:
headers.setRawHeaders('X-B3-ParentSpanId',
[hex_str(trace.parent_span_id)])
# Similar to the headers above we use the annotation 'http.uri' for
# because that is the standard set forth in the finagle http Codec.
trace.record(Annotation.string('http.uri', uri))
trace.record(Annotation.client_send())
def _finished(resp):
# TODO: It may be advantageous here to return a wrapped response
# whose deliverBody can wrap it's protocol and record when the
# application has finished reading the contents.
trace.record(Annotation.string(
'http.responsecode',
'{0} {1}'.format(resp.code, resp.phrase)))
trace.record(Annotation.client_recv())
return resp
d = self._agent.request(method, uri, headers, bodyProducer)
d.addBoth(_finished)
return d | python | def request(self, method, uri, headers=None, bodyProducer=None):
"""
Send a client request following HTTP redirects.
@see: L{Agent.request}.
"""
if self._parent_trace is None:
trace = Trace(method)
else:
trace = self._parent_trace.child(method)
if self._endpoint is not None:
trace.set_endpoint(self._endpoint)
if headers is None:
headers = Headers({})
# These headers are based on the headers used by finagle's tracing
# http Codec.
#
# https://github.com/twitter/finagle/blob/master/finagle-http/
#
# Currently not implemented are X-B3-Sampled and X-B3-Flags
# Tryfer's underlying Trace implementation has no notion of a Sampled
# trace and I haven't figured out what flags are for.
headers.setRawHeaders('X-B3-TraceId', [hex_str(trace.trace_id)])
headers.setRawHeaders('X-B3-SpanId', [hex_str(trace.span_id)])
if trace.parent_span_id is not None:
headers.setRawHeaders('X-B3-ParentSpanId',
[hex_str(trace.parent_span_id)])
# Similar to the headers above we use the annotation 'http.uri' for
# because that is the standard set forth in the finagle http Codec.
trace.record(Annotation.string('http.uri', uri))
trace.record(Annotation.client_send())
def _finished(resp):
# TODO: It may be advantageous here to return a wrapped response
# whose deliverBody can wrap it's protocol and record when the
# application has finished reading the contents.
trace.record(Annotation.string(
'http.responsecode',
'{0} {1}'.format(resp.code, resp.phrase)))
trace.record(Annotation.client_recv())
return resp
d = self._agent.request(method, uri, headers, bodyProducer)
d.addBoth(_finished)
return d | [
"def",
"request",
"(",
"self",
",",
"method",
",",
"uri",
",",
"headers",
"=",
"None",
",",
"bodyProducer",
"=",
"None",
")",
":",
"if",
"self",
".",
"_parent_trace",
"is",
"None",
":",
"trace",
"=",
"Trace",
"(",
"method",
")",
"else",
":",
"trace",
"=",
"self",
".",
"_parent_trace",
".",
"child",
"(",
"method",
")",
"if",
"self",
".",
"_endpoint",
"is",
"not",
"None",
":",
"trace",
".",
"set_endpoint",
"(",
"self",
".",
"_endpoint",
")",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"Headers",
"(",
"{",
"}",
")",
"# These headers are based on the headers used by finagle's tracing",
"# http Codec.",
"#",
"# https://github.com/twitter/finagle/blob/master/finagle-http/",
"#",
"# Currently not implemented are X-B3-Sampled and X-B3-Flags",
"# Tryfer's underlying Trace implementation has no notion of a Sampled",
"# trace and I haven't figured out what flags are for.",
"headers",
".",
"setRawHeaders",
"(",
"'X-B3-TraceId'",
",",
"[",
"hex_str",
"(",
"trace",
".",
"trace_id",
")",
"]",
")",
"headers",
".",
"setRawHeaders",
"(",
"'X-B3-SpanId'",
",",
"[",
"hex_str",
"(",
"trace",
".",
"span_id",
")",
"]",
")",
"if",
"trace",
".",
"parent_span_id",
"is",
"not",
"None",
":",
"headers",
".",
"setRawHeaders",
"(",
"'X-B3-ParentSpanId'",
",",
"[",
"hex_str",
"(",
"trace",
".",
"parent_span_id",
")",
"]",
")",
"# Similar to the headers above we use the annotation 'http.uri' for",
"# because that is the standard set forth in the finagle http Codec.",
"trace",
".",
"record",
"(",
"Annotation",
".",
"string",
"(",
"'http.uri'",
",",
"uri",
")",
")",
"trace",
".",
"record",
"(",
"Annotation",
".",
"client_send",
"(",
")",
")",
"def",
"_finished",
"(",
"resp",
")",
":",
"# TODO: It may be advantageous here to return a wrapped response",
"# whose deliverBody can wrap it's protocol and record when the",
"# application has finished reading the contents.",
"trace",
".",
"record",
"(",
"Annotation",
".",
"string",
"(",
"'http.responsecode'",
",",
"'{0} {1}'",
".",
"format",
"(",
"resp",
".",
"code",
",",
"resp",
".",
"phrase",
")",
")",
")",
"trace",
".",
"record",
"(",
"Annotation",
".",
"client_recv",
"(",
")",
")",
"return",
"resp",
"d",
"=",
"self",
".",
"_agent",
".",
"request",
"(",
"method",
",",
"uri",
",",
"headers",
",",
"bodyProducer",
")",
"d",
".",
"addBoth",
"(",
"_finished",
")",
"return",
"d"
] | Send a client request following HTTP redirects.
@see: L{Agent.request}. | [
"Send",
"a",
"client",
"request",
"following",
"HTTP",
"redirects",
"."
] | d4aa45b39eab5ce4b06d6343344afb05a0bf8582 | https://github.com/tryfer/tryfer/blob/d4aa45b39eab5ce4b06d6343344afb05a0bf8582/tryfer/http.py#L42-L92 | train |
bennylope/smartystreets.py | smartystreets/client.py | stringify | def stringify(data):
"""
Ensure all values in the dictionary are strings, except for the value for `candidate` which
should just be an integer.
:param data: a list of addresses in dictionary format
:return: the same list with all values except for `candidate` count as a string
"""
def serialize(k, v):
if k == "candidates":
return int(v)
if isinstance(v, numbers.Number):
if k == "zipcode":
# If values are presented as integers then leading digits may be cut off,
# and these are significant for the zipcode. Add them back.
return str(v).zfill(5)
return str(v)
return v
return [{k: serialize(k, v) for k, v in json_dict.items()} for json_dict in data] | python | def stringify(data):
"""
Ensure all values in the dictionary are strings, except for the value for `candidate` which
should just be an integer.
:param data: a list of addresses in dictionary format
:return: the same list with all values except for `candidate` count as a string
"""
def serialize(k, v):
if k == "candidates":
return int(v)
if isinstance(v, numbers.Number):
if k == "zipcode":
# If values are presented as integers then leading digits may be cut off,
# and these are significant for the zipcode. Add them back.
return str(v).zfill(5)
return str(v)
return v
return [{k: serialize(k, v) for k, v in json_dict.items()} for json_dict in data] | [
"def",
"stringify",
"(",
"data",
")",
":",
"def",
"serialize",
"(",
"k",
",",
"v",
")",
":",
"if",
"k",
"==",
"\"candidates\"",
":",
"return",
"int",
"(",
"v",
")",
"if",
"isinstance",
"(",
"v",
",",
"numbers",
".",
"Number",
")",
":",
"if",
"k",
"==",
"\"zipcode\"",
":",
"# If values are presented as integers then leading digits may be cut off,",
"# and these are significant for the zipcode. Add them back.",
"return",
"str",
"(",
"v",
")",
".",
"zfill",
"(",
"5",
")",
"return",
"str",
"(",
"v",
")",
"return",
"v",
"return",
"[",
"{",
"k",
":",
"serialize",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"json_dict",
".",
"items",
"(",
")",
"}",
"for",
"json_dict",
"in",
"data",
"]"
] | Ensure all values in the dictionary are strings, except for the value for `candidate` which
should just be an integer.
:param data: a list of addresses in dictionary format
:return: the same list with all values except for `candidate` count as a string | [
"Ensure",
"all",
"values",
"in",
"the",
"dictionary",
"are",
"strings",
"except",
"for",
"the",
"value",
"for",
"candidate",
"which",
"should",
"just",
"be",
"an",
"integer",
"."
] | f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69 | https://github.com/bennylope/smartystreets.py/blob/f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69/smartystreets/client.py#L59-L82 | train |
bennylope/smartystreets.py | smartystreets/client.py | Client.post | def post(self, endpoint, data):
"""
Executes the HTTP POST request
:param endpoint: string indicating the URL component to call
:param data: the data to submit
:return: the dumped JSON response content
"""
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"x-standardize-only": "true" if self.standardize else "false",
"x-include-invalid": "true" if self.invalid else "false",
"x-accept-keypair": "true" if self.accept_keypair else "false",
}
if not self.logging:
headers["x-suppress-logging"] = "true"
params = {"auth-id": self.auth_id, "auth-token": self.auth_token}
url = self.BASE_URL + endpoint
response = self.session.post(
url,
json.dumps(stringify(data)),
params=params,
headers=headers,
timeout=self.timeout,
)
if response.status_code == 200:
return response.json()
raise ERROR_CODES.get(response.status_code, SmartyStreetsError) | python | def post(self, endpoint, data):
"""
Executes the HTTP POST request
:param endpoint: string indicating the URL component to call
:param data: the data to submit
:return: the dumped JSON response content
"""
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"x-standardize-only": "true" if self.standardize else "false",
"x-include-invalid": "true" if self.invalid else "false",
"x-accept-keypair": "true" if self.accept_keypair else "false",
}
if not self.logging:
headers["x-suppress-logging"] = "true"
params = {"auth-id": self.auth_id, "auth-token": self.auth_token}
url = self.BASE_URL + endpoint
response = self.session.post(
url,
json.dumps(stringify(data)),
params=params,
headers=headers,
timeout=self.timeout,
)
if response.status_code == 200:
return response.json()
raise ERROR_CODES.get(response.status_code, SmartyStreetsError) | [
"def",
"post",
"(",
"self",
",",
"endpoint",
",",
"data",
")",
":",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"application/json\"",
",",
"\"Accept\"",
":",
"\"application/json\"",
",",
"\"x-standardize-only\"",
":",
"\"true\"",
"if",
"self",
".",
"standardize",
"else",
"\"false\"",
",",
"\"x-include-invalid\"",
":",
"\"true\"",
"if",
"self",
".",
"invalid",
"else",
"\"false\"",
",",
"\"x-accept-keypair\"",
":",
"\"true\"",
"if",
"self",
".",
"accept_keypair",
"else",
"\"false\"",
",",
"}",
"if",
"not",
"self",
".",
"logging",
":",
"headers",
"[",
"\"x-suppress-logging\"",
"]",
"=",
"\"true\"",
"params",
"=",
"{",
"\"auth-id\"",
":",
"self",
".",
"auth_id",
",",
"\"auth-token\"",
":",
"self",
".",
"auth_token",
"}",
"url",
"=",
"self",
".",
"BASE_URL",
"+",
"endpoint",
"response",
"=",
"self",
".",
"session",
".",
"post",
"(",
"url",
",",
"json",
".",
"dumps",
"(",
"stringify",
"(",
"data",
")",
")",
",",
"params",
"=",
"params",
",",
"headers",
"=",
"headers",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"return",
"response",
".",
"json",
"(",
")",
"raise",
"ERROR_CODES",
".",
"get",
"(",
"response",
".",
"status_code",
",",
"SmartyStreetsError",
")"
] | Executes the HTTP POST request
:param endpoint: string indicating the URL component to call
:param data: the data to submit
:return: the dumped JSON response content | [
"Executes",
"the",
"HTTP",
"POST",
"request"
] | f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69 | https://github.com/bennylope/smartystreets.py/blob/f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69/smartystreets/client.py#L128-L158 | train |
bennylope/smartystreets.py | smartystreets/client.py | Client.street_addresses | def street_addresses(self, addresses):
"""
API method for verifying street address and geolocating
Returns an AddressCollection always for consistency. In common usage it'd be simple and
sane to return an Address when only one address was searched, however this makes
populating search addresses from lists of unknown length problematic. If that list
returns only one address now the code has to check the type of return value to ensure
that it isn't applying behavior for an expected list type rather than a single dictionary.
>>> client.street_addresses(["100 Main St, Anywhere, USA"], ["6 S Blvd, Richmond, VA"])
>>> client.street_addresses([{"street": "100 Main St, anywhere USA"}, ... ])
:param addresses: 1 or more addresses in string or dict format
:return: an AddressCollection
"""
# While it's okay in theory to accept freeform addresses they do need to be submitted in
# a dictionary format.
if type(addresses[0]) != dict:
addresses = [{"street": arg for arg in addresses}]
return AddressCollection(self.post("street-address", data=addresses)) | python | def street_addresses(self, addresses):
"""
API method for verifying street address and geolocating
Returns an AddressCollection always for consistency. In common usage it'd be simple and
sane to return an Address when only one address was searched, however this makes
populating search addresses from lists of unknown length problematic. If that list
returns only one address now the code has to check the type of return value to ensure
that it isn't applying behavior for an expected list type rather than a single dictionary.
>>> client.street_addresses(["100 Main St, Anywhere, USA"], ["6 S Blvd, Richmond, VA"])
>>> client.street_addresses([{"street": "100 Main St, anywhere USA"}, ... ])
:param addresses: 1 or more addresses in string or dict format
:return: an AddressCollection
"""
# While it's okay in theory to accept freeform addresses they do need to be submitted in
# a dictionary format.
if type(addresses[0]) != dict:
addresses = [{"street": arg for arg in addresses}]
return AddressCollection(self.post("street-address", data=addresses)) | [
"def",
"street_addresses",
"(",
"self",
",",
"addresses",
")",
":",
"# While it's okay in theory to accept freeform addresses they do need to be submitted in",
"# a dictionary format.",
"if",
"type",
"(",
"addresses",
"[",
"0",
"]",
")",
"!=",
"dict",
":",
"addresses",
"=",
"[",
"{",
"\"street\"",
":",
"arg",
"for",
"arg",
"in",
"addresses",
"}",
"]",
"return",
"AddressCollection",
"(",
"self",
".",
"post",
"(",
"\"street-address\"",
",",
"data",
"=",
"addresses",
")",
")"
] | API method for verifying street address and geolocating
Returns an AddressCollection always for consistency. In common usage it'd be simple and
sane to return an Address when only one address was searched, however this makes
populating search addresses from lists of unknown length problematic. If that list
returns only one address now the code has to check the type of return value to ensure
that it isn't applying behavior for an expected list type rather than a single dictionary.
>>> client.street_addresses(["100 Main St, Anywhere, USA"], ["6 S Blvd, Richmond, VA"])
>>> client.street_addresses([{"street": "100 Main St, anywhere USA"}, ... ])
:param addresses: 1 or more addresses in string or dict format
:return: an AddressCollection | [
"API",
"method",
"for",
"verifying",
"street",
"address",
"and",
"geolocating"
] | f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69 | https://github.com/bennylope/smartystreets.py/blob/f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69/smartystreets/client.py#L162-L184 | train |
bennylope/smartystreets.py | smartystreets/client.py | Client.street_address | def street_address(self, address):
"""
Geocode one and only address, get a single Address object back
>>> client.street_address("100 Main St, Anywhere, USA")
>>> client.street_address({"street": "100 Main St, anywhere USA"})
:param address: string or dictionary with street address information
:return: an Address object or None for no match
"""
address = self.street_addresses([address])
if not len(address):
return None
return Address(address[0]) | python | def street_address(self, address):
"""
Geocode one and only address, get a single Address object back
>>> client.street_address("100 Main St, Anywhere, USA")
>>> client.street_address({"street": "100 Main St, anywhere USA"})
:param address: string or dictionary with street address information
:return: an Address object or None for no match
"""
address = self.street_addresses([address])
if not len(address):
return None
return Address(address[0]) | [
"def",
"street_address",
"(",
"self",
",",
"address",
")",
":",
"address",
"=",
"self",
".",
"street_addresses",
"(",
"[",
"address",
"]",
")",
"if",
"not",
"len",
"(",
"address",
")",
":",
"return",
"None",
"return",
"Address",
"(",
"address",
"[",
"0",
"]",
")"
] | Geocode one and only address, get a single Address object back
>>> client.street_address("100 Main St, Anywhere, USA")
>>> client.street_address({"street": "100 Main St, anywhere USA"})
:param address: string or dictionary with street address information
:return: an Address object or None for no match | [
"Geocode",
"one",
"and",
"only",
"address",
"get",
"a",
"single",
"Address",
"object",
"back"
] | f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69 | https://github.com/bennylope/smartystreets.py/blob/f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69/smartystreets/client.py#L186-L200 | train |
johnnoone/json-spec | src/jsonspec/validators/__init__.py | load | def load(schema, uri=None, spec=None, provider=None):
"""Scaffold a validator against a schema.
:param schema: the schema to compile into a Validator
:type schema: Mapping
:param uri: the uri of the schema.
it may be ignored in case of not cross
referencing.
:type uri: Pointer, str
:param spec: fallback to this spec if the schema does not provides ts own
:type spec: str
:param provider: the other schemas, in case of cross
referencing
:type provider: Mapping, Provider...
"""
factory = Factory(provider, spec)
return factory(schema, uri or '#') | python | def load(schema, uri=None, spec=None, provider=None):
"""Scaffold a validator against a schema.
:param schema: the schema to compile into a Validator
:type schema: Mapping
:param uri: the uri of the schema.
it may be ignored in case of not cross
referencing.
:type uri: Pointer, str
:param spec: fallback to this spec if the schema does not provides ts own
:type spec: str
:param provider: the other schemas, in case of cross
referencing
:type provider: Mapping, Provider...
"""
factory = Factory(provider, spec)
return factory(schema, uri or '#') | [
"def",
"load",
"(",
"schema",
",",
"uri",
"=",
"None",
",",
"spec",
"=",
"None",
",",
"provider",
"=",
"None",
")",
":",
"factory",
"=",
"Factory",
"(",
"provider",
",",
"spec",
")",
"return",
"factory",
"(",
"schema",
",",
"uri",
"or",
"'#'",
")"
] | Scaffold a validator against a schema.
:param schema: the schema to compile into a Validator
:type schema: Mapping
:param uri: the uri of the schema.
it may be ignored in case of not cross
referencing.
:type uri: Pointer, str
:param spec: fallback to this spec if the schema does not provides ts own
:type spec: str
:param provider: the other schemas, in case of cross
referencing
:type provider: Mapping, Provider... | [
"Scaffold",
"a",
"validator",
"against",
"a",
"schema",
"."
] | f91981724cea0c366bd42a6670eb07bbe31c0e0c | https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/validators/__init__.py#L21-L37 | train |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/gui/windows_and_widgets/widgets.py | MatplotlibWidget.sizeHint | def sizeHint(self):
"""
gives qt a starting point for widget size during window resizing
"""
w, h = self.get_width_height()
return QtCore.QSize(w, h) | python | def sizeHint(self):
"""
gives qt a starting point for widget size during window resizing
"""
w, h = self.get_width_height()
return QtCore.QSize(w, h) | [
"def",
"sizeHint",
"(",
"self",
")",
":",
"w",
",",
"h",
"=",
"self",
".",
"get_width_height",
"(",
")",
"return",
"QtCore",
".",
"QSize",
"(",
"w",
",",
"h",
")"
] | gives qt a starting point for widget size during window resizing | [
"gives",
"qt",
"a",
"starting",
"point",
"for",
"widget",
"size",
"during",
"window",
"resizing"
] | 67482e5157fcd1c40705e5c2cacfb93564703ed0 | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/gui/windows_and_widgets/widgets.py#L416-L421 | train |
johnnoone/json-spec | src/jsonspec/pointer/stages.py | stage | def stage(obj, parent=None, member=None):
"""
Prepare obj to be staged.
This is almost used for relative JSON Pointers.
"""
obj = Staged(obj, parent, member)
if isinstance(obj, Mapping):
for key, value in obj.items():
stage(value, obj, key)
elif isinstance(obj, Sequence) and not isinstance(obj, string_types):
for index, value in enumerate(obj):
stage(value, obj, index)
elif isinstance(obj, Set):
for value in obj:
stage(value, obj, None)
return obj | python | def stage(obj, parent=None, member=None):
"""
Prepare obj to be staged.
This is almost used for relative JSON Pointers.
"""
obj = Staged(obj, parent, member)
if isinstance(obj, Mapping):
for key, value in obj.items():
stage(value, obj, key)
elif isinstance(obj, Sequence) and not isinstance(obj, string_types):
for index, value in enumerate(obj):
stage(value, obj, index)
elif isinstance(obj, Set):
for value in obj:
stage(value, obj, None)
return obj | [
"def",
"stage",
"(",
"obj",
",",
"parent",
"=",
"None",
",",
"member",
"=",
"None",
")",
":",
"obj",
"=",
"Staged",
"(",
"obj",
",",
"parent",
",",
"member",
")",
"if",
"isinstance",
"(",
"obj",
",",
"Mapping",
")",
":",
"for",
"key",
",",
"value",
"in",
"obj",
".",
"items",
"(",
")",
":",
"stage",
"(",
"value",
",",
"obj",
",",
"key",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"obj",
",",
"string_types",
")",
":",
"for",
"index",
",",
"value",
"in",
"enumerate",
"(",
"obj",
")",
":",
"stage",
"(",
"value",
",",
"obj",
",",
"index",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"Set",
")",
":",
"for",
"value",
"in",
"obj",
":",
"stage",
"(",
"value",
",",
"obj",
",",
"None",
")",
"return",
"obj"
] | Prepare obj to be staged.
This is almost used for relative JSON Pointers. | [
"Prepare",
"obj",
"to",
"be",
"staged",
"."
] | f91981724cea0c366bd42a6670eb07bbe31c0e0c | https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/pointer/stages.py#L50-L68 | train |
dirkcuys/s3-backup-rotate | dcu/active_memory/rotate.py | rotate | def rotate(key_prefix, key_ext, bucket_name, daily_backups=7, weekly_backups=4, aws_key=None, aws_secret=None):
""" Delete old files we've uploaded to S3 according to grandfather, father, sun strategy """
session = boto3.Session(
aws_access_key_id=aws_key,
aws_secret_access_key=aws_secret
)
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
keys = bucket.objects.filter(Prefix=key_prefix)
regex = '{0}-(?P<year>[\d]+?)-(?P<month>[\d]+?)-(?P<day>[\d]+?){1}'.format(key_prefix, key_ext)
backups = []
for key in keys:
match = re.match(regex, str(key.key))
if not match:
continue
year = int(match.group('year'))
month = int(match.group('month'))
day = int(match.group('day'))
key_date = datetime(year, month, day)
backups[:0] = [key_date]
backups = sorted(backups, reverse=True)
if len(backups) > daily_backups+1 and backups[daily_backups] - backups[daily_backups+1] < timedelta(days=7):
key = bucket.Object("{0}{1}{2}".format(key_prefix,backups[daily_backups].strftime("-%Y-%m-%d"), key_ext))
logger.debug("deleting {0}".format(key))
key.delete()
del backups[daily_backups]
month_offset = daily_backups + weekly_backups
if len(backups) > month_offset+1 and backups[month_offset] - backups[month_offset+1] < timedelta(days=30):
key = bucket.Object("{0}{1}{2}".format(key_prefix,backups[month_offset].strftime("-%Y-%m-%d"), key_ext))
logger.debug("deleting {0}".format(key))
key.delete()
del backups[month_offset] | python | def rotate(key_prefix, key_ext, bucket_name, daily_backups=7, weekly_backups=4, aws_key=None, aws_secret=None):
""" Delete old files we've uploaded to S3 according to grandfather, father, sun strategy """
session = boto3.Session(
aws_access_key_id=aws_key,
aws_secret_access_key=aws_secret
)
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
keys = bucket.objects.filter(Prefix=key_prefix)
regex = '{0}-(?P<year>[\d]+?)-(?P<month>[\d]+?)-(?P<day>[\d]+?){1}'.format(key_prefix, key_ext)
backups = []
for key in keys:
match = re.match(regex, str(key.key))
if not match:
continue
year = int(match.group('year'))
month = int(match.group('month'))
day = int(match.group('day'))
key_date = datetime(year, month, day)
backups[:0] = [key_date]
backups = sorted(backups, reverse=True)
if len(backups) > daily_backups+1 and backups[daily_backups] - backups[daily_backups+1] < timedelta(days=7):
key = bucket.Object("{0}{1}{2}".format(key_prefix,backups[daily_backups].strftime("-%Y-%m-%d"), key_ext))
logger.debug("deleting {0}".format(key))
key.delete()
del backups[daily_backups]
month_offset = daily_backups + weekly_backups
if len(backups) > month_offset+1 and backups[month_offset] - backups[month_offset+1] < timedelta(days=30):
key = bucket.Object("{0}{1}{2}".format(key_prefix,backups[month_offset].strftime("-%Y-%m-%d"), key_ext))
logger.debug("deleting {0}".format(key))
key.delete()
del backups[month_offset] | [
"def",
"rotate",
"(",
"key_prefix",
",",
"key_ext",
",",
"bucket_name",
",",
"daily_backups",
"=",
"7",
",",
"weekly_backups",
"=",
"4",
",",
"aws_key",
"=",
"None",
",",
"aws_secret",
"=",
"None",
")",
":",
"session",
"=",
"boto3",
".",
"Session",
"(",
"aws_access_key_id",
"=",
"aws_key",
",",
"aws_secret_access_key",
"=",
"aws_secret",
")",
"s3",
"=",
"session",
".",
"resource",
"(",
"'s3'",
")",
"bucket",
"=",
"s3",
".",
"Bucket",
"(",
"bucket_name",
")",
"keys",
"=",
"bucket",
".",
"objects",
".",
"filter",
"(",
"Prefix",
"=",
"key_prefix",
")",
"regex",
"=",
"'{0}-(?P<year>[\\d]+?)-(?P<month>[\\d]+?)-(?P<day>[\\d]+?){1}'",
".",
"format",
"(",
"key_prefix",
",",
"key_ext",
")",
"backups",
"=",
"[",
"]",
"for",
"key",
"in",
"keys",
":",
"match",
"=",
"re",
".",
"match",
"(",
"regex",
",",
"str",
"(",
"key",
".",
"key",
")",
")",
"if",
"not",
"match",
":",
"continue",
"year",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'year'",
")",
")",
"month",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'month'",
")",
")",
"day",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'day'",
")",
")",
"key_date",
"=",
"datetime",
"(",
"year",
",",
"month",
",",
"day",
")",
"backups",
"[",
":",
"0",
"]",
"=",
"[",
"key_date",
"]",
"backups",
"=",
"sorted",
"(",
"backups",
",",
"reverse",
"=",
"True",
")",
"if",
"len",
"(",
"backups",
")",
">",
"daily_backups",
"+",
"1",
"and",
"backups",
"[",
"daily_backups",
"]",
"-",
"backups",
"[",
"daily_backups",
"+",
"1",
"]",
"<",
"timedelta",
"(",
"days",
"=",
"7",
")",
":",
"key",
"=",
"bucket",
".",
"Object",
"(",
"\"{0}{1}{2}\"",
".",
"format",
"(",
"key_prefix",
",",
"backups",
"[",
"daily_backups",
"]",
".",
"strftime",
"(",
"\"-%Y-%m-%d\"",
")",
",",
"key_ext",
")",
")",
"logger",
".",
"debug",
"(",
"\"deleting {0}\"",
".",
"format",
"(",
"key",
")",
")",
"key",
".",
"delete",
"(",
")",
"del",
"backups",
"[",
"daily_backups",
"]",
"month_offset",
"=",
"daily_backups",
"+",
"weekly_backups",
"if",
"len",
"(",
"backups",
")",
">",
"month_offset",
"+",
"1",
"and",
"backups",
"[",
"month_offset",
"]",
"-",
"backups",
"[",
"month_offset",
"+",
"1",
"]",
"<",
"timedelta",
"(",
"days",
"=",
"30",
")",
":",
"key",
"=",
"bucket",
".",
"Object",
"(",
"\"{0}{1}{2}\"",
".",
"format",
"(",
"key_prefix",
",",
"backups",
"[",
"month_offset",
"]",
".",
"strftime",
"(",
"\"-%Y-%m-%d\"",
")",
",",
"key_ext",
")",
")",
"logger",
".",
"debug",
"(",
"\"deleting {0}\"",
".",
"format",
"(",
"key",
")",
")",
"key",
".",
"delete",
"(",
")",
"del",
"backups",
"[",
"month_offset",
"]"
] | Delete old files we've uploaded to S3 according to grandfather, father, sun strategy | [
"Delete",
"old",
"files",
"we",
"ve",
"uploaded",
"to",
"S3",
"according",
"to",
"grandfather",
"father",
"sun",
"strategy"
] | ab226c7b636550823a9c91e3ebd81776d255f204 | https://github.com/dirkcuys/s3-backup-rotate/blob/ab226c7b636550823a9c91e3ebd81776d255f204/dcu/active_memory/rotate.py#L10-L46 | train |
dirkcuys/s3-backup-rotate | dcu/active_memory/rotate.py | splitext | def splitext( filename ):
""" Return the filename and extension according to the first dot in the filename.
This helps date stamping .tar.bz2 or .ext.gz files properly.
"""
index = filename.find('.')
if index == 0:
index = 1+filename[1:].find('.')
if index == -1:
return filename, ''
return filename[:index], filename[index:]
return os.path.splitext(filename) | python | def splitext( filename ):
""" Return the filename and extension according to the first dot in the filename.
This helps date stamping .tar.bz2 or .ext.gz files properly.
"""
index = filename.find('.')
if index == 0:
index = 1+filename[1:].find('.')
if index == -1:
return filename, ''
return filename[:index], filename[index:]
return os.path.splitext(filename) | [
"def",
"splitext",
"(",
"filename",
")",
":",
"index",
"=",
"filename",
".",
"find",
"(",
"'.'",
")",
"if",
"index",
"==",
"0",
":",
"index",
"=",
"1",
"+",
"filename",
"[",
"1",
":",
"]",
".",
"find",
"(",
"'.'",
")",
"if",
"index",
"==",
"-",
"1",
":",
"return",
"filename",
",",
"''",
"return",
"filename",
"[",
":",
"index",
"]",
",",
"filename",
"[",
"index",
":",
"]",
"return",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")"
] | Return the filename and extension according to the first dot in the filename.
This helps date stamping .tar.bz2 or .ext.gz files properly. | [
"Return",
"the",
"filename",
"and",
"extension",
"according",
"to",
"the",
"first",
"dot",
"in",
"the",
"filename",
".",
"This",
"helps",
"date",
"stamping",
".",
"tar",
".",
"bz2",
"or",
".",
"ext",
".",
"gz",
"files",
"properly",
"."
] | ab226c7b636550823a9c91e3ebd81776d255f204 | https://github.com/dirkcuys/s3-backup-rotate/blob/ab226c7b636550823a9c91e3ebd81776d255f204/dcu/active_memory/rotate.py#L49-L59 | train |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/gui/manual_fitting.py | FittingWindow.start_fitting | def start_fitting(self):
"""
Launches the fitting routine on another thread
"""
self.queue = queue.Queue()
self.peak_vals = []
self.fit_thread = QThread() #must be assigned as an instance variable, not local, as otherwise thread is garbage
#collected immediately at the end of the function before it runs
self.fitobj = self.do_fit(str(self.data_filepath.text()), self.matplotlibwidget, self.queue, self.peak_vals, self.peak_locs)
self.fitobj.moveToThread(self.fit_thread)
self.fit_thread.started.connect(self.fitobj.run)
self.fitobj.finished.connect(self.fit_thread.quit) # clean up. quit thread after script is finished
self.fitobj.status.connect(self.update_status)
self.fit_thread.start() | python | def start_fitting(self):
"""
Launches the fitting routine on another thread
"""
self.queue = queue.Queue()
self.peak_vals = []
self.fit_thread = QThread() #must be assigned as an instance variable, not local, as otherwise thread is garbage
#collected immediately at the end of the function before it runs
self.fitobj = self.do_fit(str(self.data_filepath.text()), self.matplotlibwidget, self.queue, self.peak_vals, self.peak_locs)
self.fitobj.moveToThread(self.fit_thread)
self.fit_thread.started.connect(self.fitobj.run)
self.fitobj.finished.connect(self.fit_thread.quit) # clean up. quit thread after script is finished
self.fitobj.status.connect(self.update_status)
self.fit_thread.start() | [
"def",
"start_fitting",
"(",
"self",
")",
":",
"self",
".",
"queue",
"=",
"queue",
".",
"Queue",
"(",
")",
"self",
".",
"peak_vals",
"=",
"[",
"]",
"self",
".",
"fit_thread",
"=",
"QThread",
"(",
")",
"#must be assigned as an instance variable, not local, as otherwise thread is garbage",
"#collected immediately at the end of the function before it runs",
"self",
".",
"fitobj",
"=",
"self",
".",
"do_fit",
"(",
"str",
"(",
"self",
".",
"data_filepath",
".",
"text",
"(",
")",
")",
",",
"self",
".",
"matplotlibwidget",
",",
"self",
".",
"queue",
",",
"self",
".",
"peak_vals",
",",
"self",
".",
"peak_locs",
")",
"self",
".",
"fitobj",
".",
"moveToThread",
"(",
"self",
".",
"fit_thread",
")",
"self",
".",
"fit_thread",
".",
"started",
".",
"connect",
"(",
"self",
".",
"fitobj",
".",
"run",
")",
"self",
".",
"fitobj",
".",
"finished",
".",
"connect",
"(",
"self",
".",
"fit_thread",
".",
"quit",
")",
"# clean up. quit thread after script is finished",
"self",
".",
"fitobj",
".",
"status",
".",
"connect",
"(",
"self",
".",
"update_status",
")",
"self",
".",
"fit_thread",
".",
"start",
"(",
")"
] | Launches the fitting routine on another thread | [
"Launches",
"the",
"fitting",
"routine",
"on",
"another",
"thread"
] | 67482e5157fcd1c40705e5c2cacfb93564703ed0 | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/gui/manual_fitting.py#L326-L339 | train |
CitrineInformatics/pif-dft | dfttopif/parsers/vasp.py | VaspParser._get_bandgap_from_bands | def _get_bandgap_from_bands(energies, nelec):
"""Compute difference in conduction band min and valence band max"""
nelec = int(nelec)
valence = [x[nelec-1] for x in energies]
conduction = [x[nelec] for x in energies]
return max(min(conduction) - max(valence), 0.0) | python | def _get_bandgap_from_bands(energies, nelec):
"""Compute difference in conduction band min and valence band max"""
nelec = int(nelec)
valence = [x[nelec-1] for x in energies]
conduction = [x[nelec] for x in energies]
return max(min(conduction) - max(valence), 0.0) | [
"def",
"_get_bandgap_from_bands",
"(",
"energies",
",",
"nelec",
")",
":",
"nelec",
"=",
"int",
"(",
"nelec",
")",
"valence",
"=",
"[",
"x",
"[",
"nelec",
"-",
"1",
"]",
"for",
"x",
"in",
"energies",
"]",
"conduction",
"=",
"[",
"x",
"[",
"nelec",
"]",
"for",
"x",
"in",
"energies",
"]",
"return",
"max",
"(",
"min",
"(",
"conduction",
")",
"-",
"max",
"(",
"valence",
")",
",",
"0.0",
")"
] | Compute difference in conduction band min and valence band max | [
"Compute",
"difference",
"in",
"conduction",
"band",
"min",
"and",
"valence",
"band",
"max"
] | d5411dc1f6c6e8d454b132977ca7ab3bb8131a80 | https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/vasp.py#L331-L336 | train |
CitrineInformatics/pif-dft | dfttopif/parsers/vasp.py | VaspParser._get_bandgap_eigenval | def _get_bandgap_eigenval(eigenval_fname, outcar_fname):
"""Get the bandgap from the EIGENVAL file"""
with open(outcar_fname, "r") as f:
parser = OutcarParser()
nelec = next(iter(filter(lambda x: "number of electrons" in x, parser.parse(f.readlines()))))["number of electrons"]
with open(eigenval_fname, "r") as f:
eigenval_info = list(EigenvalParser().parse(f.readlines()))
# spin_polarized = (2 == len(next(filter(lambda x: "kpoint" in x, eigenval_info))["occupancies"][0]))
# if spin_polarized:
all_energies = [zip(*x["energies"]) for x in eigenval_info if "energies" in x]
spin_energies = zip(*all_energies)
gaps = [VaspParser._get_bandgap_from_bands(x, nelec/2.0) for x in spin_energies]
return min(gaps) | python | def _get_bandgap_eigenval(eigenval_fname, outcar_fname):
"""Get the bandgap from the EIGENVAL file"""
with open(outcar_fname, "r") as f:
parser = OutcarParser()
nelec = next(iter(filter(lambda x: "number of electrons" in x, parser.parse(f.readlines()))))["number of electrons"]
with open(eigenval_fname, "r") as f:
eigenval_info = list(EigenvalParser().parse(f.readlines()))
# spin_polarized = (2 == len(next(filter(lambda x: "kpoint" in x, eigenval_info))["occupancies"][0]))
# if spin_polarized:
all_energies = [zip(*x["energies"]) for x in eigenval_info if "energies" in x]
spin_energies = zip(*all_energies)
gaps = [VaspParser._get_bandgap_from_bands(x, nelec/2.0) for x in spin_energies]
return min(gaps) | [
"def",
"_get_bandgap_eigenval",
"(",
"eigenval_fname",
",",
"outcar_fname",
")",
":",
"with",
"open",
"(",
"outcar_fname",
",",
"\"r\"",
")",
"as",
"f",
":",
"parser",
"=",
"OutcarParser",
"(",
")",
"nelec",
"=",
"next",
"(",
"iter",
"(",
"filter",
"(",
"lambda",
"x",
":",
"\"number of electrons\"",
"in",
"x",
",",
"parser",
".",
"parse",
"(",
"f",
".",
"readlines",
"(",
")",
")",
")",
")",
")",
"[",
"\"number of electrons\"",
"]",
"with",
"open",
"(",
"eigenval_fname",
",",
"\"r\"",
")",
"as",
"f",
":",
"eigenval_info",
"=",
"list",
"(",
"EigenvalParser",
"(",
")",
".",
"parse",
"(",
"f",
".",
"readlines",
"(",
")",
")",
")",
"# spin_polarized = (2 == len(next(filter(lambda x: \"kpoint\" in x, eigenval_info))[\"occupancies\"][0]))",
"# if spin_polarized:",
"all_energies",
"=",
"[",
"zip",
"(",
"*",
"x",
"[",
"\"energies\"",
"]",
")",
"for",
"x",
"in",
"eigenval_info",
"if",
"\"energies\"",
"in",
"x",
"]",
"spin_energies",
"=",
"zip",
"(",
"*",
"all_energies",
")",
"gaps",
"=",
"[",
"VaspParser",
".",
"_get_bandgap_from_bands",
"(",
"x",
",",
"nelec",
"/",
"2.0",
")",
"for",
"x",
"in",
"spin_energies",
"]",
"return",
"min",
"(",
"gaps",
")"
] | Get the bandgap from the EIGENVAL file | [
"Get",
"the",
"bandgap",
"from",
"the",
"EIGENVAL",
"file"
] | d5411dc1f6c6e8d454b132977ca7ab3bb8131a80 | https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/vasp.py#L339-L351 | train |
CitrineInformatics/pif-dft | dfttopif/parsers/vasp.py | VaspParser._get_bandgap_doscar | def _get_bandgap_doscar(filename):
"""Get the bandgap from the DOSCAR file"""
with open(filename) as fp:
for i in range(6):
l = fp.readline()
efermi = float(l.split()[3])
step1 = fp.readline().split()[0]
step2 = fp.readline().split()[0]
step_size = float(step2)-float(step1)
not_found = True
while not_found:
l = fp.readline().split()
e = float(l.pop(0))
dens = 0.0
for i in range(int(len(l)/2)):
dens += float(l[i])
if e < efermi and dens > 1e-3:
bot = e
elif e > efermi and dens > 1e-3:
top = e
not_found = False
if top - bot < step_size*2:
bandgap = 0.0
else:
bandgap = float(top - bot)
return bandgap | python | def _get_bandgap_doscar(filename):
"""Get the bandgap from the DOSCAR file"""
with open(filename) as fp:
for i in range(6):
l = fp.readline()
efermi = float(l.split()[3])
step1 = fp.readline().split()[0]
step2 = fp.readline().split()[0]
step_size = float(step2)-float(step1)
not_found = True
while not_found:
l = fp.readline().split()
e = float(l.pop(0))
dens = 0.0
for i in range(int(len(l)/2)):
dens += float(l[i])
if e < efermi and dens > 1e-3:
bot = e
elif e > efermi and dens > 1e-3:
top = e
not_found = False
if top - bot < step_size*2:
bandgap = 0.0
else:
bandgap = float(top - bot)
return bandgap | [
"def",
"_get_bandgap_doscar",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"fp",
":",
"for",
"i",
"in",
"range",
"(",
"6",
")",
":",
"l",
"=",
"fp",
".",
"readline",
"(",
")",
"efermi",
"=",
"float",
"(",
"l",
".",
"split",
"(",
")",
"[",
"3",
"]",
")",
"step1",
"=",
"fp",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"step2",
"=",
"fp",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"step_size",
"=",
"float",
"(",
"step2",
")",
"-",
"float",
"(",
"step1",
")",
"not_found",
"=",
"True",
"while",
"not_found",
":",
"l",
"=",
"fp",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
"e",
"=",
"float",
"(",
"l",
".",
"pop",
"(",
"0",
")",
")",
"dens",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"int",
"(",
"len",
"(",
"l",
")",
"/",
"2",
")",
")",
":",
"dens",
"+=",
"float",
"(",
"l",
"[",
"i",
"]",
")",
"if",
"e",
"<",
"efermi",
"and",
"dens",
">",
"1e-3",
":",
"bot",
"=",
"e",
"elif",
"e",
">",
"efermi",
"and",
"dens",
">",
"1e-3",
":",
"top",
"=",
"e",
"not_found",
"=",
"False",
"if",
"top",
"-",
"bot",
"<",
"step_size",
"*",
"2",
":",
"bandgap",
"=",
"0.0",
"else",
":",
"bandgap",
"=",
"float",
"(",
"top",
"-",
"bot",
")",
"return",
"bandgap"
] | Get the bandgap from the DOSCAR file | [
"Get",
"the",
"bandgap",
"from",
"the",
"DOSCAR",
"file"
] | d5411dc1f6c6e8d454b132977ca7ab3bb8131a80 | https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/vasp.py#L354-L380 | train |
CitrineInformatics/pif-dft | dfttopif/parsers/vasp.py | VaspParser.get_band_gap | def get_band_gap(self):
"""Get the bandgap, either from the EIGENVAL or DOSCAR files"""
if self.outcar is not None and self.eignval is not None:
bandgap = VaspParser._get_bandgap_eigenval(self.eignval, self.outcar)
elif self.doscar is not None:
bandgap = VaspParser._get_bandgap_doscar(self.doscar)
else:
return None
return Property(scalars=[Scalar(value=round(bandgap, 3))], units='eV') | python | def get_band_gap(self):
"""Get the bandgap, either from the EIGENVAL or DOSCAR files"""
if self.outcar is not None and self.eignval is not None:
bandgap = VaspParser._get_bandgap_eigenval(self.eignval, self.outcar)
elif self.doscar is not None:
bandgap = VaspParser._get_bandgap_doscar(self.doscar)
else:
return None
return Property(scalars=[Scalar(value=round(bandgap, 3))], units='eV') | [
"def",
"get_band_gap",
"(",
"self",
")",
":",
"if",
"self",
".",
"outcar",
"is",
"not",
"None",
"and",
"self",
".",
"eignval",
"is",
"not",
"None",
":",
"bandgap",
"=",
"VaspParser",
".",
"_get_bandgap_eigenval",
"(",
"self",
".",
"eignval",
",",
"self",
".",
"outcar",
")",
"elif",
"self",
".",
"doscar",
"is",
"not",
"None",
":",
"bandgap",
"=",
"VaspParser",
".",
"_get_bandgap_doscar",
"(",
"self",
".",
"doscar",
")",
"else",
":",
"return",
"None",
"return",
"Property",
"(",
"scalars",
"=",
"[",
"Scalar",
"(",
"value",
"=",
"round",
"(",
"bandgap",
",",
"3",
")",
")",
"]",
",",
"units",
"=",
"'eV'",
")"
] | Get the bandgap, either from the EIGENVAL or DOSCAR files | [
"Get",
"the",
"bandgap",
"either",
"from",
"the",
"EIGENVAL",
"or",
"DOSCAR",
"files"
] | d5411dc1f6c6e8d454b132977ca7ab3bb8131a80 | https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/vasp.py#L382-L390 | train |
lreis2415/PyGeoC | pygeoc/raster.py | Raster.get_value_by_xy | def get_value_by_xy(self, x, y):
"""Get raster value by xy coordinates.
Args:
x: X Coordinate.
y: Y Coordinate.
Returns:
raster value, None if the input are invalid.
"""
if x < self.xMin or x > self.xMax or y < self.yMin or y > self.yMax:
return None
# raise ValueError("The x or y value must be within the Min and Max!")
else:
row = self.nRows - int(numpy.ceil((y - self.yMin) / self.dx))
col = int(numpy.floor((x - self.xMin) / self.dx))
value = self.data[row][col]
if value == self.noDataValue:
return None
else:
return value | python | def get_value_by_xy(self, x, y):
"""Get raster value by xy coordinates.
Args:
x: X Coordinate.
y: Y Coordinate.
Returns:
raster value, None if the input are invalid.
"""
if x < self.xMin or x > self.xMax or y < self.yMin or y > self.yMax:
return None
# raise ValueError("The x or y value must be within the Min and Max!")
else:
row = self.nRows - int(numpy.ceil((y - self.yMin) / self.dx))
col = int(numpy.floor((x - self.xMin) / self.dx))
value = self.data[row][col]
if value == self.noDataValue:
return None
else:
return value | [
"def",
"get_value_by_xy",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"if",
"x",
"<",
"self",
".",
"xMin",
"or",
"x",
">",
"self",
".",
"xMax",
"or",
"y",
"<",
"self",
".",
"yMin",
"or",
"y",
">",
"self",
".",
"yMax",
":",
"return",
"None",
"# raise ValueError(\"The x or y value must be within the Min and Max!\")",
"else",
":",
"row",
"=",
"self",
".",
"nRows",
"-",
"int",
"(",
"numpy",
".",
"ceil",
"(",
"(",
"y",
"-",
"self",
".",
"yMin",
")",
"/",
"self",
".",
"dx",
")",
")",
"col",
"=",
"int",
"(",
"numpy",
".",
"floor",
"(",
"(",
"x",
"-",
"self",
".",
"xMin",
")",
"/",
"self",
".",
"dx",
")",
")",
"value",
"=",
"self",
".",
"data",
"[",
"row",
"]",
"[",
"col",
"]",
"if",
"value",
"==",
"self",
".",
"noDataValue",
":",
"return",
"None",
"else",
":",
"return",
"value"
] | Get raster value by xy coordinates.
Args:
x: X Coordinate.
y: Y Coordinate.
Returns:
raster value, None if the input are invalid. | [
"Get",
"raster",
"value",
"by",
"xy",
"coordinates",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L196-L216 | train |
lreis2415/PyGeoC | pygeoc/raster.py | Raster.get_central_coors | def get_central_coors(self, row, col):
"""Get the coordinates of central grid.
Args:
row: row number, range from 0 to (nRows - 1).
col: col number, range from 0 to (nCols - 1).
Returns:
XY coordinates. If the row or col are invalid, raise ValueError.
"""
if row < 0 or row >= self.nRows or col < 0 or col >= self.nCols:
raise ValueError("The row (%d) or col (%d) must be >=0 and less than "
"nRows (%d) or nCols (%d)!" % (row, col, self.nRows, self.nCols))
else:
tmpx = self.xMin + (col + 0.5) * self.dx
tmpy = self.yMax - (row + 0.5) * self.dx
return tmpx, tmpy | python | def get_central_coors(self, row, col):
"""Get the coordinates of central grid.
Args:
row: row number, range from 0 to (nRows - 1).
col: col number, range from 0 to (nCols - 1).
Returns:
XY coordinates. If the row or col are invalid, raise ValueError.
"""
if row < 0 or row >= self.nRows or col < 0 or col >= self.nCols:
raise ValueError("The row (%d) or col (%d) must be >=0 and less than "
"nRows (%d) or nCols (%d)!" % (row, col, self.nRows, self.nCols))
else:
tmpx = self.xMin + (col + 0.5) * self.dx
tmpy = self.yMax - (row + 0.5) * self.dx
return tmpx, tmpy | [
"def",
"get_central_coors",
"(",
"self",
",",
"row",
",",
"col",
")",
":",
"if",
"row",
"<",
"0",
"or",
"row",
">=",
"self",
".",
"nRows",
"or",
"col",
"<",
"0",
"or",
"col",
">=",
"self",
".",
"nCols",
":",
"raise",
"ValueError",
"(",
"\"The row (%d) or col (%d) must be >=0 and less than \"",
"\"nRows (%d) or nCols (%d)!\"",
"%",
"(",
"row",
",",
"col",
",",
"self",
".",
"nRows",
",",
"self",
".",
"nCols",
")",
")",
"else",
":",
"tmpx",
"=",
"self",
".",
"xMin",
"+",
"(",
"col",
"+",
"0.5",
")",
"*",
"self",
".",
"dx",
"tmpy",
"=",
"self",
".",
"yMax",
"-",
"(",
"row",
"+",
"0.5",
")",
"*",
"self",
".",
"dx",
"return",
"tmpx",
",",
"tmpy"
] | Get the coordinates of central grid.
Args:
row: row number, range from 0 to (nRows - 1).
col: col number, range from 0 to (nCols - 1).
Returns:
XY coordinates. If the row or col are invalid, raise ValueError. | [
"Get",
"the",
"coordinates",
"of",
"central",
"grid",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L218-L234 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.read_raster | def read_raster(raster_file):
"""Read raster by GDAL.
Args:
raster_file: raster file path.
Returns:
Raster object.
"""
ds = gdal_Open(raster_file)
band = ds.GetRasterBand(1)
data = band.ReadAsArray()
xsize = band.XSize
ysize = band.YSize
nodata_value = band.GetNoDataValue()
geotrans = ds.GetGeoTransform()
dttype = band.DataType
srs = osr_SpatialReference()
srs.ImportFromWkt(ds.GetProjection())
# print(srs.ExportToProj4())
if nodata_value is None:
nodata_value = DEFAULT_NODATA
band = None
ds = None
return Raster(ysize, xsize, data, nodata_value, geotrans, srs, dttype) | python | def read_raster(raster_file):
"""Read raster by GDAL.
Args:
raster_file: raster file path.
Returns:
Raster object.
"""
ds = gdal_Open(raster_file)
band = ds.GetRasterBand(1)
data = band.ReadAsArray()
xsize = band.XSize
ysize = band.YSize
nodata_value = band.GetNoDataValue()
geotrans = ds.GetGeoTransform()
dttype = band.DataType
srs = osr_SpatialReference()
srs.ImportFromWkt(ds.GetProjection())
# print(srs.ExportToProj4())
if nodata_value is None:
nodata_value = DEFAULT_NODATA
band = None
ds = None
return Raster(ysize, xsize, data, nodata_value, geotrans, srs, dttype) | [
"def",
"read_raster",
"(",
"raster_file",
")",
":",
"ds",
"=",
"gdal_Open",
"(",
"raster_file",
")",
"band",
"=",
"ds",
".",
"GetRasterBand",
"(",
"1",
")",
"data",
"=",
"band",
".",
"ReadAsArray",
"(",
")",
"xsize",
"=",
"band",
".",
"XSize",
"ysize",
"=",
"band",
".",
"YSize",
"nodata_value",
"=",
"band",
".",
"GetNoDataValue",
"(",
")",
"geotrans",
"=",
"ds",
".",
"GetGeoTransform",
"(",
")",
"dttype",
"=",
"band",
".",
"DataType",
"srs",
"=",
"osr_SpatialReference",
"(",
")",
"srs",
".",
"ImportFromWkt",
"(",
"ds",
".",
"GetProjection",
"(",
")",
")",
"# print(srs.ExportToProj4())",
"if",
"nodata_value",
"is",
"None",
":",
"nodata_value",
"=",
"DEFAULT_NODATA",
"band",
"=",
"None",
"ds",
"=",
"None",
"return",
"Raster",
"(",
"ysize",
",",
"xsize",
",",
"data",
",",
"nodata_value",
",",
"geotrans",
",",
"srs",
",",
"dttype",
")"
] | Read raster by GDAL.
Args:
raster_file: raster file path.
Returns:
Raster object. | [
"Read",
"raster",
"by",
"GDAL",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L249-L275 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.get_mask_from_raster | def get_mask_from_raster(rasterfile, outmaskfile, keep_nodata=False):
"""Generate mask data from a given raster data.
Args:
rasterfile: raster file path.
outmaskfile: output mask file path.
Returns:
Raster object of mask data.
"""
raster_r = RasterUtilClass.read_raster(rasterfile)
xsize = raster_r.nCols
ysize = raster_r.nRows
nodata_value = raster_r.noDataValue
srs = raster_r.srs
x_min = raster_r.xMin
y_max = raster_r.yMax
dx = raster_r.dx
data = raster_r.data
if not keep_nodata:
i_min = ysize - 1
i_max = 0
j_min = xsize - 1
j_max = 0
for i in range(ysize):
for j in range(xsize):
if abs(data[i][j] - nodata_value) > DELTA:
i_min = min(i, i_min)
i_max = max(i, i_max)
j_min = min(j, j_min)
j_max = max(j, j_max)
# print(i_min, i_max, j_min, j_max)
y_size_mask = i_max - i_min + 1
x_size_mask = j_max - j_min + 1
x_min_mask = x_min + j_min * dx
y_max_mask = y_max - i_min * dx
else:
y_size_mask = ysize
x_size_mask = xsize
x_min_mask = x_min
y_max_mask = y_max
i_min = 0
j_min = 0
print('%dx%d -> %dx%d' % (xsize, ysize, x_size_mask, y_size_mask))
mask = numpy.zeros((y_size_mask, x_size_mask))
for i in range(y_size_mask):
for j in range(x_size_mask):
if abs(data[i + i_min][j + j_min] - nodata_value) > DELTA:
mask[i][j] = 1
else:
mask[i][j] = DEFAULT_NODATA
mask_geotrans = [x_min_mask, dx, 0, y_max_mask, 0, -dx]
RasterUtilClass.write_gtiff_file(outmaskfile, y_size_mask, x_size_mask, mask,
mask_geotrans, srs, DEFAULT_NODATA, GDT_Int32)
return Raster(y_size_mask, x_size_mask, mask, DEFAULT_NODATA, mask_geotrans, srs) | python | def get_mask_from_raster(rasterfile, outmaskfile, keep_nodata=False):
"""Generate mask data from a given raster data.
Args:
rasterfile: raster file path.
outmaskfile: output mask file path.
Returns:
Raster object of mask data.
"""
raster_r = RasterUtilClass.read_raster(rasterfile)
xsize = raster_r.nCols
ysize = raster_r.nRows
nodata_value = raster_r.noDataValue
srs = raster_r.srs
x_min = raster_r.xMin
y_max = raster_r.yMax
dx = raster_r.dx
data = raster_r.data
if not keep_nodata:
i_min = ysize - 1
i_max = 0
j_min = xsize - 1
j_max = 0
for i in range(ysize):
for j in range(xsize):
if abs(data[i][j] - nodata_value) > DELTA:
i_min = min(i, i_min)
i_max = max(i, i_max)
j_min = min(j, j_min)
j_max = max(j, j_max)
# print(i_min, i_max, j_min, j_max)
y_size_mask = i_max - i_min + 1
x_size_mask = j_max - j_min + 1
x_min_mask = x_min + j_min * dx
y_max_mask = y_max - i_min * dx
else:
y_size_mask = ysize
x_size_mask = xsize
x_min_mask = x_min
y_max_mask = y_max
i_min = 0
j_min = 0
print('%dx%d -> %dx%d' % (xsize, ysize, x_size_mask, y_size_mask))
mask = numpy.zeros((y_size_mask, x_size_mask))
for i in range(y_size_mask):
for j in range(x_size_mask):
if abs(data[i + i_min][j + j_min] - nodata_value) > DELTA:
mask[i][j] = 1
else:
mask[i][j] = DEFAULT_NODATA
mask_geotrans = [x_min_mask, dx, 0, y_max_mask, 0, -dx]
RasterUtilClass.write_gtiff_file(outmaskfile, y_size_mask, x_size_mask, mask,
mask_geotrans, srs, DEFAULT_NODATA, GDT_Int32)
return Raster(y_size_mask, x_size_mask, mask, DEFAULT_NODATA, mask_geotrans, srs) | [
"def",
"get_mask_from_raster",
"(",
"rasterfile",
",",
"outmaskfile",
",",
"keep_nodata",
"=",
"False",
")",
":",
"raster_r",
"=",
"RasterUtilClass",
".",
"read_raster",
"(",
"rasterfile",
")",
"xsize",
"=",
"raster_r",
".",
"nCols",
"ysize",
"=",
"raster_r",
".",
"nRows",
"nodata_value",
"=",
"raster_r",
".",
"noDataValue",
"srs",
"=",
"raster_r",
".",
"srs",
"x_min",
"=",
"raster_r",
".",
"xMin",
"y_max",
"=",
"raster_r",
".",
"yMax",
"dx",
"=",
"raster_r",
".",
"dx",
"data",
"=",
"raster_r",
".",
"data",
"if",
"not",
"keep_nodata",
":",
"i_min",
"=",
"ysize",
"-",
"1",
"i_max",
"=",
"0",
"j_min",
"=",
"xsize",
"-",
"1",
"j_max",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"ysize",
")",
":",
"for",
"j",
"in",
"range",
"(",
"xsize",
")",
":",
"if",
"abs",
"(",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"-",
"nodata_value",
")",
">",
"DELTA",
":",
"i_min",
"=",
"min",
"(",
"i",
",",
"i_min",
")",
"i_max",
"=",
"max",
"(",
"i",
",",
"i_max",
")",
"j_min",
"=",
"min",
"(",
"j",
",",
"j_min",
")",
"j_max",
"=",
"max",
"(",
"j",
",",
"j_max",
")",
"# print(i_min, i_max, j_min, j_max)",
"y_size_mask",
"=",
"i_max",
"-",
"i_min",
"+",
"1",
"x_size_mask",
"=",
"j_max",
"-",
"j_min",
"+",
"1",
"x_min_mask",
"=",
"x_min",
"+",
"j_min",
"*",
"dx",
"y_max_mask",
"=",
"y_max",
"-",
"i_min",
"*",
"dx",
"else",
":",
"y_size_mask",
"=",
"ysize",
"x_size_mask",
"=",
"xsize",
"x_min_mask",
"=",
"x_min",
"y_max_mask",
"=",
"y_max",
"i_min",
"=",
"0",
"j_min",
"=",
"0",
"print",
"(",
"'%dx%d -> %dx%d'",
"%",
"(",
"xsize",
",",
"ysize",
",",
"x_size_mask",
",",
"y_size_mask",
")",
")",
"mask",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"y_size_mask",
",",
"x_size_mask",
")",
")",
"for",
"i",
"in",
"range",
"(",
"y_size_mask",
")",
":",
"for",
"j",
"in",
"range",
"(",
"x_size_mask",
")",
":",
"if",
"abs",
"(",
"data",
"[",
"i",
"+",
"i_min",
"]",
"[",
"j",
"+",
"j_min",
"]",
"-",
"nodata_value",
")",
">",
"DELTA",
":",
"mask",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"1",
"else",
":",
"mask",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"DEFAULT_NODATA",
"mask_geotrans",
"=",
"[",
"x_min_mask",
",",
"dx",
",",
"0",
",",
"y_max_mask",
",",
"0",
",",
"-",
"dx",
"]",
"RasterUtilClass",
".",
"write_gtiff_file",
"(",
"outmaskfile",
",",
"y_size_mask",
",",
"x_size_mask",
",",
"mask",
",",
"mask_geotrans",
",",
"srs",
",",
"DEFAULT_NODATA",
",",
"GDT_Int32",
")",
"return",
"Raster",
"(",
"y_size_mask",
",",
"x_size_mask",
",",
"mask",
",",
"DEFAULT_NODATA",
",",
"mask_geotrans",
",",
"srs",
")"
] | Generate mask data from a given raster data.
Args:
rasterfile: raster file path.
outmaskfile: output mask file path.
Returns:
Raster object of mask data. | [
"Generate",
"mask",
"data",
"from",
"a",
"given",
"raster",
"data",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L278-L337 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.raster_reclassify | def raster_reclassify(srcfile, v_dict, dstfile, gdaltype=GDT_Float32):
"""Reclassify raster by given classifier dict.
Args:
srcfile: source raster file.
v_dict: classifier dict.
dstfile: destination file path.
gdaltype (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.
"""
src_r = RasterUtilClass.read_raster(srcfile)
src_data = src_r.data
dst_data = numpy.copy(src_data)
if gdaltype == GDT_Float32 and src_r.dataType != GDT_Float32:
gdaltype = src_r.dataType
no_data = src_r.noDataValue
new_no_data = DEFAULT_NODATA
if gdaltype in [GDT_Unknown, GDT_Byte, GDT_UInt16, GDT_UInt32]:
new_no_data = 0
if not MathClass.floatequal(new_no_data, src_r.noDataValue):
if src_r.noDataValue not in v_dict:
v_dict[src_r.noDataValue] = new_no_data
no_data = new_no_data
for (k, v) in iteritems(v_dict):
dst_data[src_data == k] = v
RasterUtilClass.write_gtiff_file(dstfile, src_r.nRows, src_r.nCols, dst_data,
src_r.geotrans, src_r.srs, no_data, gdaltype) | python | def raster_reclassify(srcfile, v_dict, dstfile, gdaltype=GDT_Float32):
"""Reclassify raster by given classifier dict.
Args:
srcfile: source raster file.
v_dict: classifier dict.
dstfile: destination file path.
gdaltype (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.
"""
src_r = RasterUtilClass.read_raster(srcfile)
src_data = src_r.data
dst_data = numpy.copy(src_data)
if gdaltype == GDT_Float32 and src_r.dataType != GDT_Float32:
gdaltype = src_r.dataType
no_data = src_r.noDataValue
new_no_data = DEFAULT_NODATA
if gdaltype in [GDT_Unknown, GDT_Byte, GDT_UInt16, GDT_UInt32]:
new_no_data = 0
if not MathClass.floatequal(new_no_data, src_r.noDataValue):
if src_r.noDataValue not in v_dict:
v_dict[src_r.noDataValue] = new_no_data
no_data = new_no_data
for (k, v) in iteritems(v_dict):
dst_data[src_data == k] = v
RasterUtilClass.write_gtiff_file(dstfile, src_r.nRows, src_r.nCols, dst_data,
src_r.geotrans, src_r.srs, no_data, gdaltype) | [
"def",
"raster_reclassify",
"(",
"srcfile",
",",
"v_dict",
",",
"dstfile",
",",
"gdaltype",
"=",
"GDT_Float32",
")",
":",
"src_r",
"=",
"RasterUtilClass",
".",
"read_raster",
"(",
"srcfile",
")",
"src_data",
"=",
"src_r",
".",
"data",
"dst_data",
"=",
"numpy",
".",
"copy",
"(",
"src_data",
")",
"if",
"gdaltype",
"==",
"GDT_Float32",
"and",
"src_r",
".",
"dataType",
"!=",
"GDT_Float32",
":",
"gdaltype",
"=",
"src_r",
".",
"dataType",
"no_data",
"=",
"src_r",
".",
"noDataValue",
"new_no_data",
"=",
"DEFAULT_NODATA",
"if",
"gdaltype",
"in",
"[",
"GDT_Unknown",
",",
"GDT_Byte",
",",
"GDT_UInt16",
",",
"GDT_UInt32",
"]",
":",
"new_no_data",
"=",
"0",
"if",
"not",
"MathClass",
".",
"floatequal",
"(",
"new_no_data",
",",
"src_r",
".",
"noDataValue",
")",
":",
"if",
"src_r",
".",
"noDataValue",
"not",
"in",
"v_dict",
":",
"v_dict",
"[",
"src_r",
".",
"noDataValue",
"]",
"=",
"new_no_data",
"no_data",
"=",
"new_no_data",
"for",
"(",
"k",
",",
"v",
")",
"in",
"iteritems",
"(",
"v_dict",
")",
":",
"dst_data",
"[",
"src_data",
"==",
"k",
"]",
"=",
"v",
"RasterUtilClass",
".",
"write_gtiff_file",
"(",
"dstfile",
",",
"src_r",
".",
"nRows",
",",
"src_r",
".",
"nCols",
",",
"dst_data",
",",
"src_r",
".",
"geotrans",
",",
"src_r",
".",
"srs",
",",
"no_data",
",",
"gdaltype",
")"
] | Reclassify raster by given classifier dict.
Args:
srcfile: source raster file.
v_dict: classifier dict.
dstfile: destination file path.
gdaltype (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default. | [
"Reclassify",
"raster",
"by",
"given",
"classifier",
"dict",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L340-L366 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.write_gtiff_file | def write_gtiff_file(f_name, n_rows, n_cols, data, geotransform, srs, nodata_value,
gdal_type=GDT_Float32):
"""Output Raster to GeoTiff format file.
Args:
f_name: output gtiff file name.
n_rows: Row count.
n_cols: Col count.
data: 2D array data.
geotransform: geographic transformation.
srs: coordinate system.
nodata_value: nodata value.
gdal_type (:obj:`pygeoc.raster.GDALDataType`): output raster data type,
GDT_Float32 as default.
"""
UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(f_name)))
driver = gdal_GetDriverByName(str('GTiff'))
try:
ds = driver.Create(f_name, n_cols, n_rows, 1, gdal_type)
except Exception:
print('Cannot create output file %s' % f_name)
return
ds.SetGeoTransform(geotransform)
try:
ds.SetProjection(srs.ExportToWkt())
except AttributeError or Exception:
ds.SetProjection(srs)
ds.GetRasterBand(1).SetNoDataValue(nodata_value)
# if data contains numpy.nan, then replaced by nodata_value
if isinstance(data, numpy.ndarray) and data.dtype in [numpy.dtype('int'),
numpy.dtype('float')]:
data = numpy.where(numpy.isnan(data), nodata_value, data)
ds.GetRasterBand(1).WriteArray(data)
ds = None | python | def write_gtiff_file(f_name, n_rows, n_cols, data, geotransform, srs, nodata_value,
gdal_type=GDT_Float32):
"""Output Raster to GeoTiff format file.
Args:
f_name: output gtiff file name.
n_rows: Row count.
n_cols: Col count.
data: 2D array data.
geotransform: geographic transformation.
srs: coordinate system.
nodata_value: nodata value.
gdal_type (:obj:`pygeoc.raster.GDALDataType`): output raster data type,
GDT_Float32 as default.
"""
UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(f_name)))
driver = gdal_GetDriverByName(str('GTiff'))
try:
ds = driver.Create(f_name, n_cols, n_rows, 1, gdal_type)
except Exception:
print('Cannot create output file %s' % f_name)
return
ds.SetGeoTransform(geotransform)
try:
ds.SetProjection(srs.ExportToWkt())
except AttributeError or Exception:
ds.SetProjection(srs)
ds.GetRasterBand(1).SetNoDataValue(nodata_value)
# if data contains numpy.nan, then replaced by nodata_value
if isinstance(data, numpy.ndarray) and data.dtype in [numpy.dtype('int'),
numpy.dtype('float')]:
data = numpy.where(numpy.isnan(data), nodata_value, data)
ds.GetRasterBand(1).WriteArray(data)
ds = None | [
"def",
"write_gtiff_file",
"(",
"f_name",
",",
"n_rows",
",",
"n_cols",
",",
"data",
",",
"geotransform",
",",
"srs",
",",
"nodata_value",
",",
"gdal_type",
"=",
"GDT_Float32",
")",
":",
"UtilClass",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"FileClass",
".",
"get_file_fullpath",
"(",
"f_name",
")",
")",
")",
"driver",
"=",
"gdal_GetDriverByName",
"(",
"str",
"(",
"'GTiff'",
")",
")",
"try",
":",
"ds",
"=",
"driver",
".",
"Create",
"(",
"f_name",
",",
"n_cols",
",",
"n_rows",
",",
"1",
",",
"gdal_type",
")",
"except",
"Exception",
":",
"print",
"(",
"'Cannot create output file %s'",
"%",
"f_name",
")",
"return",
"ds",
".",
"SetGeoTransform",
"(",
"geotransform",
")",
"try",
":",
"ds",
".",
"SetProjection",
"(",
"srs",
".",
"ExportToWkt",
"(",
")",
")",
"except",
"AttributeError",
"or",
"Exception",
":",
"ds",
".",
"SetProjection",
"(",
"srs",
")",
"ds",
".",
"GetRasterBand",
"(",
"1",
")",
".",
"SetNoDataValue",
"(",
"nodata_value",
")",
"# if data contains numpy.nan, then replaced by nodata_value",
"if",
"isinstance",
"(",
"data",
",",
"numpy",
".",
"ndarray",
")",
"and",
"data",
".",
"dtype",
"in",
"[",
"numpy",
".",
"dtype",
"(",
"'int'",
")",
",",
"numpy",
".",
"dtype",
"(",
"'float'",
")",
"]",
":",
"data",
"=",
"numpy",
".",
"where",
"(",
"numpy",
".",
"isnan",
"(",
"data",
")",
",",
"nodata_value",
",",
"data",
")",
"ds",
".",
"GetRasterBand",
"(",
"1",
")",
".",
"WriteArray",
"(",
"data",
")",
"ds",
"=",
"None"
] | Output Raster to GeoTiff format file.
Args:
f_name: output gtiff file name.
n_rows: Row count.
n_cols: Col count.
data: 2D array data.
geotransform: geographic transformation.
srs: coordinate system.
nodata_value: nodata value.
gdal_type (:obj:`pygeoc.raster.GDALDataType`): output raster data type,
GDT_Float32 as default. | [
"Output",
"Raster",
"to",
"GeoTiff",
"format",
"file",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L369-L402 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.write_asc_file | def write_asc_file(filename, data, xsize, ysize, geotransform, nodata_value):
"""Output Raster to ASCII file.
Args:
filename: output ASCII filename.
data: 2D array data.
xsize: Col count.
ysize: Row count.
geotransform: geographic transformation.
nodata_value: nodata_flow value.
"""
UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(filename)))
header = 'NCOLS %d\n' \
'NROWS %d\n' \
'XLLCENTER %f\n' \
'YLLCENTER %f\n' \
'CELLSIZE %f\n' \
'NODATA_VALUE %f' % (xsize, ysize, geotransform[0] + 0.5 * geotransform[1],
geotransform[3] - (ysize - 0.5) * geotransform[1],
geotransform[1], nodata_value)
with open(filename, 'w', encoding='utf-8') as f:
f.write(header)
for i in range(0, ysize):
for j in range(0, xsize):
f.write('%s\t' % repr(data[i][j]))
f.write('\n')
f.close() | python | def write_asc_file(filename, data, xsize, ysize, geotransform, nodata_value):
"""Output Raster to ASCII file.
Args:
filename: output ASCII filename.
data: 2D array data.
xsize: Col count.
ysize: Row count.
geotransform: geographic transformation.
nodata_value: nodata_flow value.
"""
UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(filename)))
header = 'NCOLS %d\n' \
'NROWS %d\n' \
'XLLCENTER %f\n' \
'YLLCENTER %f\n' \
'CELLSIZE %f\n' \
'NODATA_VALUE %f' % (xsize, ysize, geotransform[0] + 0.5 * geotransform[1],
geotransform[3] - (ysize - 0.5) * geotransform[1],
geotransform[1], nodata_value)
with open(filename, 'w', encoding='utf-8') as f:
f.write(header)
for i in range(0, ysize):
for j in range(0, xsize):
f.write('%s\t' % repr(data[i][j]))
f.write('\n')
f.close() | [
"def",
"write_asc_file",
"(",
"filename",
",",
"data",
",",
"xsize",
",",
"ysize",
",",
"geotransform",
",",
"nodata_value",
")",
":",
"UtilClass",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"FileClass",
".",
"get_file_fullpath",
"(",
"filename",
")",
")",
")",
"header",
"=",
"'NCOLS %d\\n'",
"'NROWS %d\\n'",
"'XLLCENTER %f\\n'",
"'YLLCENTER %f\\n'",
"'CELLSIZE %f\\n'",
"'NODATA_VALUE %f'",
"%",
"(",
"xsize",
",",
"ysize",
",",
"geotransform",
"[",
"0",
"]",
"+",
"0.5",
"*",
"geotransform",
"[",
"1",
"]",
",",
"geotransform",
"[",
"3",
"]",
"-",
"(",
"ysize",
"-",
"0.5",
")",
"*",
"geotransform",
"[",
"1",
"]",
",",
"geotransform",
"[",
"1",
"]",
",",
"nodata_value",
")",
"with",
"open",
"(",
"filename",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"header",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"ysize",
")",
":",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"xsize",
")",
":",
"f",
".",
"write",
"(",
"'%s\\t'",
"%",
"repr",
"(",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
")",
")",
"f",
".",
"write",
"(",
"'\\n'",
")",
"f",
".",
"close",
"(",
")"
] | Output Raster to ASCII file.
Args:
filename: output ASCII filename.
data: 2D array data.
xsize: Col count.
ysize: Row count.
geotransform: geographic transformation.
nodata_value: nodata_flow value. | [
"Output",
"Raster",
"to",
"ASCII",
"file",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L405-L432 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.raster_to_gtiff | def raster_to_gtiff(tif, geotif, change_nodata=False, change_gdal_type=False):
"""Converting Raster format to GeoTIFF.
Args:
tif: source raster file path.
geotif: output raster file path.
change_nodata: change NoDataValue to -9999 or not.
gdal_type (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.
change_gdal_type: If True, output the Float32 data type.
"""
rst_file = RasterUtilClass.read_raster(tif)
nodata = rst_file.noDataValue
if change_nodata:
if not MathClass.floatequal(rst_file.noDataValue, DEFAULT_NODATA):
nodata = DEFAULT_NODATA
rst_file.data[rst_file.data == rst_file.noDataValue] = DEFAULT_NODATA
gdal_type = rst_file.dataType
if change_gdal_type:
gdal_type = GDT_Float32
RasterUtilClass.write_gtiff_file(geotif, rst_file.nRows, rst_file.nCols, rst_file.data,
rst_file.geotrans, rst_file.srs, nodata,
gdal_type) | python | def raster_to_gtiff(tif, geotif, change_nodata=False, change_gdal_type=False):
"""Converting Raster format to GeoTIFF.
Args:
tif: source raster file path.
geotif: output raster file path.
change_nodata: change NoDataValue to -9999 or not.
gdal_type (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.
change_gdal_type: If True, output the Float32 data type.
"""
rst_file = RasterUtilClass.read_raster(tif)
nodata = rst_file.noDataValue
if change_nodata:
if not MathClass.floatequal(rst_file.noDataValue, DEFAULT_NODATA):
nodata = DEFAULT_NODATA
rst_file.data[rst_file.data == rst_file.noDataValue] = DEFAULT_NODATA
gdal_type = rst_file.dataType
if change_gdal_type:
gdal_type = GDT_Float32
RasterUtilClass.write_gtiff_file(geotif, rst_file.nRows, rst_file.nCols, rst_file.data,
rst_file.geotrans, rst_file.srs, nodata,
gdal_type) | [
"def",
"raster_to_gtiff",
"(",
"tif",
",",
"geotif",
",",
"change_nodata",
"=",
"False",
",",
"change_gdal_type",
"=",
"False",
")",
":",
"rst_file",
"=",
"RasterUtilClass",
".",
"read_raster",
"(",
"tif",
")",
"nodata",
"=",
"rst_file",
".",
"noDataValue",
"if",
"change_nodata",
":",
"if",
"not",
"MathClass",
".",
"floatequal",
"(",
"rst_file",
".",
"noDataValue",
",",
"DEFAULT_NODATA",
")",
":",
"nodata",
"=",
"DEFAULT_NODATA",
"rst_file",
".",
"data",
"[",
"rst_file",
".",
"data",
"==",
"rst_file",
".",
"noDataValue",
"]",
"=",
"DEFAULT_NODATA",
"gdal_type",
"=",
"rst_file",
".",
"dataType",
"if",
"change_gdal_type",
":",
"gdal_type",
"=",
"GDT_Float32",
"RasterUtilClass",
".",
"write_gtiff_file",
"(",
"geotif",
",",
"rst_file",
".",
"nRows",
",",
"rst_file",
".",
"nCols",
",",
"rst_file",
".",
"data",
",",
"rst_file",
".",
"geotrans",
",",
"rst_file",
".",
"srs",
",",
"nodata",
",",
"gdal_type",
")"
] | Converting Raster format to GeoTIFF.
Args:
tif: source raster file path.
geotif: output raster file path.
change_nodata: change NoDataValue to -9999 or not.
gdal_type (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.
change_gdal_type: If True, output the Float32 data type. | [
"Converting",
"Raster",
"format",
"to",
"GeoTIFF",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L435-L456 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.raster_to_asc | def raster_to_asc(raster_f, asc_f):
"""Converting Raster format to ASCII raster.
Args:
raster_f: raster file.
asc_f: output ASCII file.
"""
raster_r = RasterUtilClass.read_raster(raster_f)
RasterUtilClass.write_asc_file(asc_f, raster_r.data, raster_r.nCols, raster_r.nRows,
raster_r.geotrans, raster_r.noDataValue) | python | def raster_to_asc(raster_f, asc_f):
"""Converting Raster format to ASCII raster.
Args:
raster_f: raster file.
asc_f: output ASCII file.
"""
raster_r = RasterUtilClass.read_raster(raster_f)
RasterUtilClass.write_asc_file(asc_f, raster_r.data, raster_r.nCols, raster_r.nRows,
raster_r.geotrans, raster_r.noDataValue) | [
"def",
"raster_to_asc",
"(",
"raster_f",
",",
"asc_f",
")",
":",
"raster_r",
"=",
"RasterUtilClass",
".",
"read_raster",
"(",
"raster_f",
")",
"RasterUtilClass",
".",
"write_asc_file",
"(",
"asc_f",
",",
"raster_r",
".",
"data",
",",
"raster_r",
".",
"nCols",
",",
"raster_r",
".",
"nRows",
",",
"raster_r",
".",
"geotrans",
",",
"raster_r",
".",
"noDataValue",
")"
] | Converting Raster format to ASCII raster.
Args:
raster_f: raster file.
asc_f: output ASCII file. | [
"Converting",
"Raster",
"format",
"to",
"ASCII",
"raster",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L459-L468 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.raster_statistics | def raster_statistics(raster_file):
"""Get basic statistics of raster data.
Args:
raster_file: raster file path.
Returns:
min, max, mean, std.
"""
ds = gdal_Open(raster_file)
band = ds.GetRasterBand(1)
minv, maxv, meanv, std = band.ComputeStatistics(False)
return minv, maxv, meanv, std | python | def raster_statistics(raster_file):
"""Get basic statistics of raster data.
Args:
raster_file: raster file path.
Returns:
min, max, mean, std.
"""
ds = gdal_Open(raster_file)
band = ds.GetRasterBand(1)
minv, maxv, meanv, std = band.ComputeStatistics(False)
return minv, maxv, meanv, std | [
"def",
"raster_statistics",
"(",
"raster_file",
")",
":",
"ds",
"=",
"gdal_Open",
"(",
"raster_file",
")",
"band",
"=",
"ds",
".",
"GetRasterBand",
"(",
"1",
")",
"minv",
",",
"maxv",
",",
"meanv",
",",
"std",
"=",
"band",
".",
"ComputeStatistics",
"(",
"False",
")",
"return",
"minv",
",",
"maxv",
",",
"meanv",
",",
"std"
] | Get basic statistics of raster data.
Args:
raster_file: raster file path.
Returns:
min, max, mean, std. | [
"Get",
"basic",
"statistics",
"of",
"raster",
"data",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L471-L483 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.split_raster | def split_raster(rs, split_shp, field_name, temp_dir):
"""Split raster by given shapefile and field name.
Args:
rs: origin raster file.
split_shp: boundary (ESRI Shapefile) used to spilt raster.
field_name: field name identify the spilt value.
temp_dir: directory to store the spilt rasters.
"""
UtilClass.rmmkdir(temp_dir)
ds = ogr_Open(split_shp)
lyr = ds.GetLayer(0)
lyr.ResetReading()
ft = lyr.GetNextFeature()
while ft:
cur_field_name = ft.GetFieldAsString(field_name)
for r in rs:
cur_file_name = r.split(os.sep)[-1]
outraster = temp_dir + os.sep + \
cur_file_name.replace('.tif', '_%s.tif' %
cur_field_name.replace(' ', '_'))
subprocess.call(['gdalwarp', r, outraster, '-cutline', split_shp,
'-crop_to_cutline', '-cwhere',
"'%s'='%s'" % (field_name, cur_field_name), '-dstnodata',
'-9999'])
ft = lyr.GetNextFeature()
ds = None | python | def split_raster(rs, split_shp, field_name, temp_dir):
"""Split raster by given shapefile and field name.
Args:
rs: origin raster file.
split_shp: boundary (ESRI Shapefile) used to spilt raster.
field_name: field name identify the spilt value.
temp_dir: directory to store the spilt rasters.
"""
UtilClass.rmmkdir(temp_dir)
ds = ogr_Open(split_shp)
lyr = ds.GetLayer(0)
lyr.ResetReading()
ft = lyr.GetNextFeature()
while ft:
cur_field_name = ft.GetFieldAsString(field_name)
for r in rs:
cur_file_name = r.split(os.sep)[-1]
outraster = temp_dir + os.sep + \
cur_file_name.replace('.tif', '_%s.tif' %
cur_field_name.replace(' ', '_'))
subprocess.call(['gdalwarp', r, outraster, '-cutline', split_shp,
'-crop_to_cutline', '-cwhere',
"'%s'='%s'" % (field_name, cur_field_name), '-dstnodata',
'-9999'])
ft = lyr.GetNextFeature()
ds = None | [
"def",
"split_raster",
"(",
"rs",
",",
"split_shp",
",",
"field_name",
",",
"temp_dir",
")",
":",
"UtilClass",
".",
"rmmkdir",
"(",
"temp_dir",
")",
"ds",
"=",
"ogr_Open",
"(",
"split_shp",
")",
"lyr",
"=",
"ds",
".",
"GetLayer",
"(",
"0",
")",
"lyr",
".",
"ResetReading",
"(",
")",
"ft",
"=",
"lyr",
".",
"GetNextFeature",
"(",
")",
"while",
"ft",
":",
"cur_field_name",
"=",
"ft",
".",
"GetFieldAsString",
"(",
"field_name",
")",
"for",
"r",
"in",
"rs",
":",
"cur_file_name",
"=",
"r",
".",
"split",
"(",
"os",
".",
"sep",
")",
"[",
"-",
"1",
"]",
"outraster",
"=",
"temp_dir",
"+",
"os",
".",
"sep",
"+",
"cur_file_name",
".",
"replace",
"(",
"'.tif'",
",",
"'_%s.tif'",
"%",
"cur_field_name",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
")",
"subprocess",
".",
"call",
"(",
"[",
"'gdalwarp'",
",",
"r",
",",
"outraster",
",",
"'-cutline'",
",",
"split_shp",
",",
"'-crop_to_cutline'",
",",
"'-cwhere'",
",",
"\"'%s'='%s'\"",
"%",
"(",
"field_name",
",",
"cur_field_name",
")",
",",
"'-dstnodata'",
",",
"'-9999'",
"]",
")",
"ft",
"=",
"lyr",
".",
"GetNextFeature",
"(",
")",
"ds",
"=",
"None"
] | Split raster by given shapefile and field name.
Args:
rs: origin raster file.
split_shp: boundary (ESRI Shapefile) used to spilt raster.
field_name: field name identify the spilt value.
temp_dir: directory to store the spilt rasters. | [
"Split",
"raster",
"by",
"given",
"shapefile",
"and",
"field",
"name",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L486-L512 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.get_negative_dem | def get_negative_dem(raw_dem, neg_dem):
"""Get negative DEM data."""
origin = RasterUtilClass.read_raster(raw_dem)
max_v = numpy.max(origin.data)
temp = origin.data < 0
neg = numpy.where(temp, origin.noDataValue, max_v - origin.data)
RasterUtilClass.write_gtiff_file(neg_dem, origin.nRows, origin.nCols, neg, origin.geotrans,
origin.srs, origin.noDataValue, origin.dataType) | python | def get_negative_dem(raw_dem, neg_dem):
"""Get negative DEM data."""
origin = RasterUtilClass.read_raster(raw_dem)
max_v = numpy.max(origin.data)
temp = origin.data < 0
neg = numpy.where(temp, origin.noDataValue, max_v - origin.data)
RasterUtilClass.write_gtiff_file(neg_dem, origin.nRows, origin.nCols, neg, origin.geotrans,
origin.srs, origin.noDataValue, origin.dataType) | [
"def",
"get_negative_dem",
"(",
"raw_dem",
",",
"neg_dem",
")",
":",
"origin",
"=",
"RasterUtilClass",
".",
"read_raster",
"(",
"raw_dem",
")",
"max_v",
"=",
"numpy",
".",
"max",
"(",
"origin",
".",
"data",
")",
"temp",
"=",
"origin",
".",
"data",
"<",
"0",
"neg",
"=",
"numpy",
".",
"where",
"(",
"temp",
",",
"origin",
".",
"noDataValue",
",",
"max_v",
"-",
"origin",
".",
"data",
")",
"RasterUtilClass",
".",
"write_gtiff_file",
"(",
"neg_dem",
",",
"origin",
".",
"nRows",
",",
"origin",
".",
"nCols",
",",
"neg",
",",
"origin",
".",
"geotrans",
",",
"origin",
".",
"srs",
",",
"origin",
".",
"noDataValue",
",",
"origin",
".",
"dataType",
")"
] | Get negative DEM data. | [
"Get",
"negative",
"DEM",
"data",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L515-L522 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.raster_binarization | def raster_binarization(given_value, rasterfilename):
"""Make the raster into binarization.
The opening and closing are based on binary image. Therefore we need to
make the raster into binarization.
Args:
given_value: The given value's pixels will be value in 1,
other pixels will be value in 0.
rasterfilename: The initial rasterfilena,e.
Returns:
binary_raster: Raster after binarization.
"""
origin_raster = RasterUtilClass.read_raster(rasterfilename)
binary_raster = numpy.where(origin_raster.data == given_value, 1, 0)
return binary_raster | python | def raster_binarization(given_value, rasterfilename):
"""Make the raster into binarization.
The opening and closing are based on binary image. Therefore we need to
make the raster into binarization.
Args:
given_value: The given value's pixels will be value in 1,
other pixels will be value in 0.
rasterfilename: The initial rasterfilena,e.
Returns:
binary_raster: Raster after binarization.
"""
origin_raster = RasterUtilClass.read_raster(rasterfilename)
binary_raster = numpy.where(origin_raster.data == given_value, 1, 0)
return binary_raster | [
"def",
"raster_binarization",
"(",
"given_value",
",",
"rasterfilename",
")",
":",
"origin_raster",
"=",
"RasterUtilClass",
".",
"read_raster",
"(",
"rasterfilename",
")",
"binary_raster",
"=",
"numpy",
".",
"where",
"(",
"origin_raster",
".",
"data",
"==",
"given_value",
",",
"1",
",",
"0",
")",
"return",
"binary_raster"
] | Make the raster into binarization.
The opening and closing are based on binary image. Therefore we need to
make the raster into binarization.
Args:
given_value: The given value's pixels will be value in 1,
other pixels will be value in 0.
rasterfilename: The initial rasterfilena,e.
Returns:
binary_raster: Raster after binarization. | [
"Make",
"the",
"raster",
"into",
"binarization",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L567-L583 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.raster_erosion | def raster_erosion(rasterfile):
"""Erode the raster image.
Find the min pixel's value in 8-neighborhood. Then change the compute
pixel's value into the min pixel's value.
Args:
rasterfile: input original raster image, type can be filename(string,
like "test1.tif"), rasterfile(class Raster) or numpy.ndarray.
Returns:
erosion_raster: raster image after erosion, type is numpy.ndarray.
"""
if is_string(rasterfile):
origin_raster = RasterUtilClass.read_raster(str(rasterfile))
elif isinstance(rasterfile, Raster):
origin_raster = rasterfile.data
elif isinstance(rasterfile, numpy.ndarray):
origin_raster = rasterfile
else:
return "Your rasterfile has a wrong type. Type must be string or " \
"numpy.array or class Raster in pygeoc."
max_value_raster = origin_raster.max()
erosion_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1]))
# In order to compute the raster edges, we need to expand the original
# raster's rows and cols. We need to add the edges whose pixels' value is
# the max pixel's value in raster.
add_row = numpy.full((1, origin_raster.shape[1]), max_value_raster)
temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row))
add_col = numpy.full((origin_raster.shape[0] + 2, 1), max_value_raster)
expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col))
# Erode the raster.
for i in range(origin_raster.shape[0]):
for j in range(origin_raster.shape[1]):
min_pixel_value = max_value_raster
# Find the min pixel value in the 8-neighborhood.
for k in range(3):
for l in range(3):
if expand_origin_raster[i + k, j + l] <= min_pixel_value:
min_pixel_value = expand_origin_raster[i + k, j + l]
# After this loop, we get the min pixel's value of the
# 8-neighborhood. Then we change the compute pixel's value into
# the min pixel's value.
erosion_raster[i, j] = min_pixel_value
# Return the result.
return erosion_raster | python | def raster_erosion(rasterfile):
"""Erode the raster image.
Find the min pixel's value in 8-neighborhood. Then change the compute
pixel's value into the min pixel's value.
Args:
rasterfile: input original raster image, type can be filename(string,
like "test1.tif"), rasterfile(class Raster) or numpy.ndarray.
Returns:
erosion_raster: raster image after erosion, type is numpy.ndarray.
"""
if is_string(rasterfile):
origin_raster = RasterUtilClass.read_raster(str(rasterfile))
elif isinstance(rasterfile, Raster):
origin_raster = rasterfile.data
elif isinstance(rasterfile, numpy.ndarray):
origin_raster = rasterfile
else:
return "Your rasterfile has a wrong type. Type must be string or " \
"numpy.array or class Raster in pygeoc."
max_value_raster = origin_raster.max()
erosion_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1]))
# In order to compute the raster edges, we need to expand the original
# raster's rows and cols. We need to add the edges whose pixels' value is
# the max pixel's value in raster.
add_row = numpy.full((1, origin_raster.shape[1]), max_value_raster)
temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row))
add_col = numpy.full((origin_raster.shape[0] + 2, 1), max_value_raster)
expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col))
# Erode the raster.
for i in range(origin_raster.shape[0]):
for j in range(origin_raster.shape[1]):
min_pixel_value = max_value_raster
# Find the min pixel value in the 8-neighborhood.
for k in range(3):
for l in range(3):
if expand_origin_raster[i + k, j + l] <= min_pixel_value:
min_pixel_value = expand_origin_raster[i + k, j + l]
# After this loop, we get the min pixel's value of the
# 8-neighborhood. Then we change the compute pixel's value into
# the min pixel's value.
erosion_raster[i, j] = min_pixel_value
# Return the result.
return erosion_raster | [
"def",
"raster_erosion",
"(",
"rasterfile",
")",
":",
"if",
"is_string",
"(",
"rasterfile",
")",
":",
"origin_raster",
"=",
"RasterUtilClass",
".",
"read_raster",
"(",
"str",
"(",
"rasterfile",
")",
")",
"elif",
"isinstance",
"(",
"rasterfile",
",",
"Raster",
")",
":",
"origin_raster",
"=",
"rasterfile",
".",
"data",
"elif",
"isinstance",
"(",
"rasterfile",
",",
"numpy",
".",
"ndarray",
")",
":",
"origin_raster",
"=",
"rasterfile",
"else",
":",
"return",
"\"Your rasterfile has a wrong type. Type must be string or \"",
"\"numpy.array or class Raster in pygeoc.\"",
"max_value_raster",
"=",
"origin_raster",
".",
"max",
"(",
")",
"erosion_raster",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"origin_raster",
".",
"shape",
"[",
"0",
"]",
",",
"origin_raster",
".",
"shape",
"[",
"1",
"]",
")",
")",
"# In order to compute the raster edges, we need to expand the original",
"# raster's rows and cols. We need to add the edges whose pixels' value is",
"# the max pixel's value in raster.",
"add_row",
"=",
"numpy",
".",
"full",
"(",
"(",
"1",
",",
"origin_raster",
".",
"shape",
"[",
"1",
"]",
")",
",",
"max_value_raster",
")",
"temp_origin_raster",
"=",
"numpy",
".",
"vstack",
"(",
"(",
"numpy",
".",
"vstack",
"(",
"(",
"add_row",
",",
"origin_raster",
")",
")",
",",
"add_row",
")",
")",
"add_col",
"=",
"numpy",
".",
"full",
"(",
"(",
"origin_raster",
".",
"shape",
"[",
"0",
"]",
"+",
"2",
",",
"1",
")",
",",
"max_value_raster",
")",
"expand_origin_raster",
"=",
"numpy",
".",
"hstack",
"(",
"(",
"numpy",
".",
"hstack",
"(",
"(",
"add_col",
",",
"temp_origin_raster",
")",
")",
",",
"add_col",
")",
")",
"# Erode the raster.",
"for",
"i",
"in",
"range",
"(",
"origin_raster",
".",
"shape",
"[",
"0",
"]",
")",
":",
"for",
"j",
"in",
"range",
"(",
"origin_raster",
".",
"shape",
"[",
"1",
"]",
")",
":",
"min_pixel_value",
"=",
"max_value_raster",
"# Find the min pixel value in the 8-neighborhood.",
"for",
"k",
"in",
"range",
"(",
"3",
")",
":",
"for",
"l",
"in",
"range",
"(",
"3",
")",
":",
"if",
"expand_origin_raster",
"[",
"i",
"+",
"k",
",",
"j",
"+",
"l",
"]",
"<=",
"min_pixel_value",
":",
"min_pixel_value",
"=",
"expand_origin_raster",
"[",
"i",
"+",
"k",
",",
"j",
"+",
"l",
"]",
"# After this loop, we get the min pixel's value of the",
"# 8-neighborhood. Then we change the compute pixel's value into",
"# the min pixel's value.",
"erosion_raster",
"[",
"i",
",",
"j",
"]",
"=",
"min_pixel_value",
"# Return the result.",
"return",
"erosion_raster"
] | Erode the raster image.
Find the min pixel's value in 8-neighborhood. Then change the compute
pixel's value into the min pixel's value.
Args:
rasterfile: input original raster image, type can be filename(string,
like "test1.tif"), rasterfile(class Raster) or numpy.ndarray.
Returns:
erosion_raster: raster image after erosion, type is numpy.ndarray. | [
"Erode",
"the",
"raster",
"image",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L586-L631 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.raster_dilation | def raster_dilation(rasterfile):
"""Dilate the raster image.
Find the max pixel's value in 8-neighborhood. Then change the compute
pixel's value into the max pixel's value.
Args:
rasterfile: input original raster image, type can be filename(string,
like "test1.tif"), rasterfile(class Raster) or numpy.ndarray.
Returns:
dilation_raster: raster image after dilation, type is numpy.ndarray.
"""
if is_string(rasterfile):
origin_raster = RasterUtilClass.read_raster(str(rasterfile))
elif isinstance(rasterfile, Raster):
origin_raster = rasterfile.data
elif isinstance(rasterfile, numpy.ndarray):
origin_raster = rasterfile
else:
return 'Your rasterfile has a wrong type. Type must be string or ' \
'numpy.array or class Raster in pygeoc.'
min_value_raster = origin_raster.min()
dilation_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1]))
# In order to compute the raster edges, we need to expand the original
# raster's rows and cols. We need to add the edges whose pixels' value is
# the max pixel's value in raster.
add_row = numpy.full((1, origin_raster.shape[1]), min_value_raster)
temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row))
add_col = numpy.full((origin_raster.shape[0] + 2, 1), min_value_raster)
expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col))
# Dilate the raster.
for i in range(origin_raster.shape[0]):
for j in range(origin_raster.shape[1]):
max_pixel_value = min_value_raster
# Find the max pixel value in the 8-neighborhood.
for k in range(3):
for l in range(3):
if expand_origin_raster[i + k, j + l] >= max_pixel_value:
max_pixel_value = expand_origin_raster[i + k, j + l]
# After this loop, we get the max pixel's value of the
# 8-neighborhood. Then we change the compute pixel's value into
# the max pixel's value.
dilation_raster[i, j] = max_pixel_value
# Return the result.
return dilation_raster | python | def raster_dilation(rasterfile):
"""Dilate the raster image.
Find the max pixel's value in 8-neighborhood. Then change the compute
pixel's value into the max pixel's value.
Args:
rasterfile: input original raster image, type can be filename(string,
like "test1.tif"), rasterfile(class Raster) or numpy.ndarray.
Returns:
dilation_raster: raster image after dilation, type is numpy.ndarray.
"""
if is_string(rasterfile):
origin_raster = RasterUtilClass.read_raster(str(rasterfile))
elif isinstance(rasterfile, Raster):
origin_raster = rasterfile.data
elif isinstance(rasterfile, numpy.ndarray):
origin_raster = rasterfile
else:
return 'Your rasterfile has a wrong type. Type must be string or ' \
'numpy.array or class Raster in pygeoc.'
min_value_raster = origin_raster.min()
dilation_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1]))
# In order to compute the raster edges, we need to expand the original
# raster's rows and cols. We need to add the edges whose pixels' value is
# the max pixel's value in raster.
add_row = numpy.full((1, origin_raster.shape[1]), min_value_raster)
temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row))
add_col = numpy.full((origin_raster.shape[0] + 2, 1), min_value_raster)
expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col))
# Dilate the raster.
for i in range(origin_raster.shape[0]):
for j in range(origin_raster.shape[1]):
max_pixel_value = min_value_raster
# Find the max pixel value in the 8-neighborhood.
for k in range(3):
for l in range(3):
if expand_origin_raster[i + k, j + l] >= max_pixel_value:
max_pixel_value = expand_origin_raster[i + k, j + l]
# After this loop, we get the max pixel's value of the
# 8-neighborhood. Then we change the compute pixel's value into
# the max pixel's value.
dilation_raster[i, j] = max_pixel_value
# Return the result.
return dilation_raster | [
"def",
"raster_dilation",
"(",
"rasterfile",
")",
":",
"if",
"is_string",
"(",
"rasterfile",
")",
":",
"origin_raster",
"=",
"RasterUtilClass",
".",
"read_raster",
"(",
"str",
"(",
"rasterfile",
")",
")",
"elif",
"isinstance",
"(",
"rasterfile",
",",
"Raster",
")",
":",
"origin_raster",
"=",
"rasterfile",
".",
"data",
"elif",
"isinstance",
"(",
"rasterfile",
",",
"numpy",
".",
"ndarray",
")",
":",
"origin_raster",
"=",
"rasterfile",
"else",
":",
"return",
"'Your rasterfile has a wrong type. Type must be string or '",
"'numpy.array or class Raster in pygeoc.'",
"min_value_raster",
"=",
"origin_raster",
".",
"min",
"(",
")",
"dilation_raster",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"origin_raster",
".",
"shape",
"[",
"0",
"]",
",",
"origin_raster",
".",
"shape",
"[",
"1",
"]",
")",
")",
"# In order to compute the raster edges, we need to expand the original",
"# raster's rows and cols. We need to add the edges whose pixels' value is",
"# the max pixel's value in raster.",
"add_row",
"=",
"numpy",
".",
"full",
"(",
"(",
"1",
",",
"origin_raster",
".",
"shape",
"[",
"1",
"]",
")",
",",
"min_value_raster",
")",
"temp_origin_raster",
"=",
"numpy",
".",
"vstack",
"(",
"(",
"numpy",
".",
"vstack",
"(",
"(",
"add_row",
",",
"origin_raster",
")",
")",
",",
"add_row",
")",
")",
"add_col",
"=",
"numpy",
".",
"full",
"(",
"(",
"origin_raster",
".",
"shape",
"[",
"0",
"]",
"+",
"2",
",",
"1",
")",
",",
"min_value_raster",
")",
"expand_origin_raster",
"=",
"numpy",
".",
"hstack",
"(",
"(",
"numpy",
".",
"hstack",
"(",
"(",
"add_col",
",",
"temp_origin_raster",
")",
")",
",",
"add_col",
")",
")",
"# Dilate the raster.",
"for",
"i",
"in",
"range",
"(",
"origin_raster",
".",
"shape",
"[",
"0",
"]",
")",
":",
"for",
"j",
"in",
"range",
"(",
"origin_raster",
".",
"shape",
"[",
"1",
"]",
")",
":",
"max_pixel_value",
"=",
"min_value_raster",
"# Find the max pixel value in the 8-neighborhood.",
"for",
"k",
"in",
"range",
"(",
"3",
")",
":",
"for",
"l",
"in",
"range",
"(",
"3",
")",
":",
"if",
"expand_origin_raster",
"[",
"i",
"+",
"k",
",",
"j",
"+",
"l",
"]",
">=",
"max_pixel_value",
":",
"max_pixel_value",
"=",
"expand_origin_raster",
"[",
"i",
"+",
"k",
",",
"j",
"+",
"l",
"]",
"# After this loop, we get the max pixel's value of the",
"# 8-neighborhood. Then we change the compute pixel's value into",
"# the max pixel's value.",
"dilation_raster",
"[",
"i",
",",
"j",
"]",
"=",
"max_pixel_value",
"# Return the result.",
"return",
"dilation_raster"
] | Dilate the raster image.
Find the max pixel's value in 8-neighborhood. Then change the compute
pixel's value into the max pixel's value.
Args:
rasterfile: input original raster image, type can be filename(string,
like "test1.tif"), rasterfile(class Raster) or numpy.ndarray.
Returns:
dilation_raster: raster image after dilation, type is numpy.ndarray. | [
"Dilate",
"the",
"raster",
"image",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L634-L679 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.openning | def openning(input_rasterfilename, times):
"""Do openning.
Openning: Erode firstly, then Dilate.
Args:
input_rasterfilename: input original raster image filename.
times: Erode and Dilate times.
Returns:
openning_raster: raster image after open.
"""
input_raster = RasterUtilClass.read_raster(input_rasterfilename)
openning_raster = input_raster
for i in range(times):
openning_raster = RasterUtilClass.raster_erosion(openning_raster)
for i in range(times):
openning_raster = RasterUtilClass.raster_dilation(openning_raster)
return openning_raster | python | def openning(input_rasterfilename, times):
"""Do openning.
Openning: Erode firstly, then Dilate.
Args:
input_rasterfilename: input original raster image filename.
times: Erode and Dilate times.
Returns:
openning_raster: raster image after open.
"""
input_raster = RasterUtilClass.read_raster(input_rasterfilename)
openning_raster = input_raster
for i in range(times):
openning_raster = RasterUtilClass.raster_erosion(openning_raster)
for i in range(times):
openning_raster = RasterUtilClass.raster_dilation(openning_raster)
return openning_raster | [
"def",
"openning",
"(",
"input_rasterfilename",
",",
"times",
")",
":",
"input_raster",
"=",
"RasterUtilClass",
".",
"read_raster",
"(",
"input_rasterfilename",
")",
"openning_raster",
"=",
"input_raster",
"for",
"i",
"in",
"range",
"(",
"times",
")",
":",
"openning_raster",
"=",
"RasterUtilClass",
".",
"raster_erosion",
"(",
"openning_raster",
")",
"for",
"i",
"in",
"range",
"(",
"times",
")",
":",
"openning_raster",
"=",
"RasterUtilClass",
".",
"raster_dilation",
"(",
"openning_raster",
")",
"return",
"openning_raster"
] | Do openning.
Openning: Erode firstly, then Dilate.
Args:
input_rasterfilename: input original raster image filename.
times: Erode and Dilate times.
Returns:
openning_raster: raster image after open. | [
"Do",
"openning",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L682-L700 | train |
lreis2415/PyGeoC | pygeoc/raster.py | RasterUtilClass.closing | def closing(input_rasterfilename, times):
"""Do closing.
Closing: Dilate firstly, then Erode.
Args:
input_rasterfilename: input original raster image filename.
times: Erode and Dilate times.
Returns:
closing_raster: raster image after close.
"""
input_raster = RasterUtilClass.read_raster(input_rasterfilename)
closing_raster = input_raster
for i in range(times):
closing_raster = RasterUtilClass.raster_dilation(closing_raster)
for i in range(times):
closing_raster = RasterUtilClass.raster_erosion(closing_raster)
return closing_raster | python | def closing(input_rasterfilename, times):
"""Do closing.
Closing: Dilate firstly, then Erode.
Args:
input_rasterfilename: input original raster image filename.
times: Erode and Dilate times.
Returns:
closing_raster: raster image after close.
"""
input_raster = RasterUtilClass.read_raster(input_rasterfilename)
closing_raster = input_raster
for i in range(times):
closing_raster = RasterUtilClass.raster_dilation(closing_raster)
for i in range(times):
closing_raster = RasterUtilClass.raster_erosion(closing_raster)
return closing_raster | [
"def",
"closing",
"(",
"input_rasterfilename",
",",
"times",
")",
":",
"input_raster",
"=",
"RasterUtilClass",
".",
"read_raster",
"(",
"input_rasterfilename",
")",
"closing_raster",
"=",
"input_raster",
"for",
"i",
"in",
"range",
"(",
"times",
")",
":",
"closing_raster",
"=",
"RasterUtilClass",
".",
"raster_dilation",
"(",
"closing_raster",
")",
"for",
"i",
"in",
"range",
"(",
"times",
")",
":",
"closing_raster",
"=",
"RasterUtilClass",
".",
"raster_erosion",
"(",
"closing_raster",
")",
"return",
"closing_raster"
] | Do closing.
Closing: Dilate firstly, then Erode.
Args:
input_rasterfilename: input original raster image filename.
times: Erode and Dilate times.
Returns:
closing_raster: raster image after close. | [
"Do",
"closing",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L703-L721 | train |
PeerAssets/pypeerassets | pypeerassets/transactions.py | calculate_tx_fee | def calculate_tx_fee(tx_size: int) -> Decimal:
'''return tx fee from tx size in bytes'''
per_kb_cost = 0.01
min_fee = Decimal(0.001)
fee = Decimal((tx_size / 1000) * per_kb_cost)
if fee <= min_fee:
return min_fee
else:
return fee | python | def calculate_tx_fee(tx_size: int) -> Decimal:
'''return tx fee from tx size in bytes'''
per_kb_cost = 0.01
min_fee = Decimal(0.001)
fee = Decimal((tx_size / 1000) * per_kb_cost)
if fee <= min_fee:
return min_fee
else:
return fee | [
"def",
"calculate_tx_fee",
"(",
"tx_size",
":",
"int",
")",
"->",
"Decimal",
":",
"per_kb_cost",
"=",
"0.01",
"min_fee",
"=",
"Decimal",
"(",
"0.001",
")",
"fee",
"=",
"Decimal",
"(",
"(",
"tx_size",
"/",
"1000",
")",
"*",
"per_kb_cost",
")",
"if",
"fee",
"<=",
"min_fee",
":",
"return",
"min_fee",
"else",
":",
"return",
"fee"
] | return tx fee from tx size in bytes | [
"return",
"tx",
"fee",
"from",
"tx",
"size",
"in",
"bytes"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/transactions.py#L214-L225 | train |
PeerAssets/pypeerassets | pypeerassets/transactions.py | p2sh_p2pkh_script | def p2sh_p2pkh_script(network: str, address: str) -> P2shScript:
'''p2sh embedding p2pkh'''
network_params = net_query(network)
addr = Address.from_string(network=network_params,
string=address)
p2pkh = P2pkhScript(addr)
return P2shScript(p2pkh) | python | def p2sh_p2pkh_script(network: str, address: str) -> P2shScript:
'''p2sh embedding p2pkh'''
network_params = net_query(network)
addr = Address.from_string(network=network_params,
string=address)
p2pkh = P2pkhScript(addr)
return P2shScript(p2pkh) | [
"def",
"p2sh_p2pkh_script",
"(",
"network",
":",
"str",
",",
"address",
":",
"str",
")",
"->",
"P2shScript",
":",
"network_params",
"=",
"net_query",
"(",
"network",
")",
"addr",
"=",
"Address",
".",
"from_string",
"(",
"network",
"=",
"network_params",
",",
"string",
"=",
"address",
")",
"p2pkh",
"=",
"P2pkhScript",
"(",
"addr",
")",
"return",
"P2shScript",
"(",
"p2pkh",
")"
] | p2sh embedding p2pkh | [
"p2sh",
"embedding",
"p2pkh"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/transactions.py#L246-L256 | train |
PeerAssets/pypeerassets | pypeerassets/transactions.py | tx_output | def tx_output(network: str, value: Decimal, n: int,
script: ScriptSig) -> TxOut:
'''create TxOut object'''
network_params = net_query(network)
return TxOut(network=network_params,
value=int(value * network_params.to_unit),
n=n, script_pubkey=script) | python | def tx_output(network: str, value: Decimal, n: int,
script: ScriptSig) -> TxOut:
'''create TxOut object'''
network_params = net_query(network)
return TxOut(network=network_params,
value=int(value * network_params.to_unit),
n=n, script_pubkey=script) | [
"def",
"tx_output",
"(",
"network",
":",
"str",
",",
"value",
":",
"Decimal",
",",
"n",
":",
"int",
",",
"script",
":",
"ScriptSig",
")",
"->",
"TxOut",
":",
"network_params",
"=",
"net_query",
"(",
"network",
")",
"return",
"TxOut",
"(",
"network",
"=",
"network_params",
",",
"value",
"=",
"int",
"(",
"value",
"*",
"network_params",
".",
"to_unit",
")",
",",
"n",
"=",
"n",
",",
"script_pubkey",
"=",
"script",
")"
] | create TxOut object | [
"create",
"TxOut",
"object"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/transactions.py#L259-L267 | train |
PeerAssets/pypeerassets | pypeerassets/transactions.py | make_raw_transaction | def make_raw_transaction(
network: str,
inputs: list,
outputs: list,
locktime: Locktime,
timestamp: int=int(time()),
version: int=1,
) -> MutableTransaction:
'''create raw transaction'''
network_params = net_query(network)
if network_params.name.startswith("peercoin"):
return MutableTransaction(
version=version,
ins=inputs,
outs=outputs,
locktime=locktime,
network=network_params,
timestamp=timestamp,
)
return MutableTransaction(
version=version,
ins=inputs,
outs=outputs,
locktime=locktime,
network=network_params,
) | python | def make_raw_transaction(
network: str,
inputs: list,
outputs: list,
locktime: Locktime,
timestamp: int=int(time()),
version: int=1,
) -> MutableTransaction:
'''create raw transaction'''
network_params = net_query(network)
if network_params.name.startswith("peercoin"):
return MutableTransaction(
version=version,
ins=inputs,
outs=outputs,
locktime=locktime,
network=network_params,
timestamp=timestamp,
)
return MutableTransaction(
version=version,
ins=inputs,
outs=outputs,
locktime=locktime,
network=network_params,
) | [
"def",
"make_raw_transaction",
"(",
"network",
":",
"str",
",",
"inputs",
":",
"list",
",",
"outputs",
":",
"list",
",",
"locktime",
":",
"Locktime",
",",
"timestamp",
":",
"int",
"=",
"int",
"(",
"time",
"(",
")",
")",
",",
"version",
":",
"int",
"=",
"1",
",",
")",
"->",
"MutableTransaction",
":",
"network_params",
"=",
"net_query",
"(",
"network",
")",
"if",
"network_params",
".",
"name",
".",
"startswith",
"(",
"\"peercoin\"",
")",
":",
"return",
"MutableTransaction",
"(",
"version",
"=",
"version",
",",
"ins",
"=",
"inputs",
",",
"outs",
"=",
"outputs",
",",
"locktime",
"=",
"locktime",
",",
"network",
"=",
"network_params",
",",
"timestamp",
"=",
"timestamp",
",",
")",
"return",
"MutableTransaction",
"(",
"version",
"=",
"version",
",",
"ins",
"=",
"inputs",
",",
"outs",
"=",
"outputs",
",",
"locktime",
"=",
"locktime",
",",
"network",
"=",
"network_params",
",",
")"
] | create raw transaction | [
"create",
"raw",
"transaction"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/transactions.py#L270-L298 | train |
PeerAssets/pypeerassets | pypeerassets/transactions.py | find_parent_outputs | def find_parent_outputs(provider: Provider, utxo: TxIn) -> TxOut:
'''due to design of the btcpy library, TxIn object must be converted to TxOut object before signing'''
network_params = net_query(provider.network)
index = utxo.txout # utxo index
return TxOut.from_json(provider.getrawtransaction(utxo.txid,
1)['vout'][index],
network=network_params) | python | def find_parent_outputs(provider: Provider, utxo: TxIn) -> TxOut:
'''due to design of the btcpy library, TxIn object must be converted to TxOut object before signing'''
network_params = net_query(provider.network)
index = utxo.txout # utxo index
return TxOut.from_json(provider.getrawtransaction(utxo.txid,
1)['vout'][index],
network=network_params) | [
"def",
"find_parent_outputs",
"(",
"provider",
":",
"Provider",
",",
"utxo",
":",
"TxIn",
")",
"->",
"TxOut",
":",
"network_params",
"=",
"net_query",
"(",
"provider",
".",
"network",
")",
"index",
"=",
"utxo",
".",
"txout",
"# utxo index",
"return",
"TxOut",
".",
"from_json",
"(",
"provider",
".",
"getrawtransaction",
"(",
"utxo",
".",
"txid",
",",
"1",
")",
"[",
"'vout'",
"]",
"[",
"index",
"]",
",",
"network",
"=",
"network_params",
")"
] | due to design of the btcpy library, TxIn object must be converted to TxOut object before signing | [
"due",
"to",
"design",
"of",
"the",
"btcpy",
"library",
"TxIn",
"object",
"must",
"be",
"converted",
"to",
"TxOut",
"object",
"before",
"signing"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/transactions.py#L301-L308 | train |
PeerAssets/pypeerassets | pypeerassets/transactions.py | sign_transaction | def sign_transaction(provider: Provider, unsigned: MutableTransaction,
key: Kutil) -> Transaction:
'''sign transaction with Kutil'''
parent_outputs = [find_parent_outputs(provider, i) for i in unsigned.ins]
return key.sign_transaction(parent_outputs, unsigned) | python | def sign_transaction(provider: Provider, unsigned: MutableTransaction,
key: Kutil) -> Transaction:
'''sign transaction with Kutil'''
parent_outputs = [find_parent_outputs(provider, i) for i in unsigned.ins]
return key.sign_transaction(parent_outputs, unsigned) | [
"def",
"sign_transaction",
"(",
"provider",
":",
"Provider",
",",
"unsigned",
":",
"MutableTransaction",
",",
"key",
":",
"Kutil",
")",
"->",
"Transaction",
":",
"parent_outputs",
"=",
"[",
"find_parent_outputs",
"(",
"provider",
",",
"i",
")",
"for",
"i",
"in",
"unsigned",
".",
"ins",
"]",
"return",
"key",
".",
"sign_transaction",
"(",
"parent_outputs",
",",
"unsigned",
")"
] | sign transaction with Kutil | [
"sign",
"transaction",
"with",
"Kutil"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/transactions.py#L311-L316 | train |
erwanp/publib | publib/main.py | set_style | def set_style(style='basic', **kwargs):
''' Changes Matplotlib basic style to produce high quality graphs. Call
this function at the beginning of your script. You can even further improve
graphs with a call to fix_style at the end of your script.
Parameters
----------
style: string
'basic', 'article', 'poster', 'B&W', 'talk', 'origin'
kwargs: dict of rcParams
add Matplotlib rcParams
Examples
--------
>>> set_style('article')
>>> set_style('poster',**{'lines.linewidth':2})
See Also
--------
:func:`~publib.publib.fix_style`,
:func:`~publib.tools.tools.reset_defaults`,
:func:`~publib.tools.tools.regenerate_fonts`
'''
style = _read_style(style)
# Add basic style as the first style
if style[0] != 'basic':
style = ['basic'] + style
# Apply all styles
for s in style:
_set_style(s, **kwargs) | python | def set_style(style='basic', **kwargs):
''' Changes Matplotlib basic style to produce high quality graphs. Call
this function at the beginning of your script. You can even further improve
graphs with a call to fix_style at the end of your script.
Parameters
----------
style: string
'basic', 'article', 'poster', 'B&W', 'talk', 'origin'
kwargs: dict of rcParams
add Matplotlib rcParams
Examples
--------
>>> set_style('article')
>>> set_style('poster',**{'lines.linewidth':2})
See Also
--------
:func:`~publib.publib.fix_style`,
:func:`~publib.tools.tools.reset_defaults`,
:func:`~publib.tools.tools.regenerate_fonts`
'''
style = _read_style(style)
# Add basic style as the first style
if style[0] != 'basic':
style = ['basic'] + style
# Apply all styles
for s in style:
_set_style(s, **kwargs) | [
"def",
"set_style",
"(",
"style",
"=",
"'basic'",
",",
"*",
"*",
"kwargs",
")",
":",
"style",
"=",
"_read_style",
"(",
"style",
")",
"# Add basic style as the first style",
"if",
"style",
"[",
"0",
"]",
"!=",
"'basic'",
":",
"style",
"=",
"[",
"'basic'",
"]",
"+",
"style",
"# Apply all styles",
"for",
"s",
"in",
"style",
":",
"_set_style",
"(",
"s",
",",
"*",
"*",
"kwargs",
")"
] | Changes Matplotlib basic style to produce high quality graphs. Call
this function at the beginning of your script. You can even further improve
graphs with a call to fix_style at the end of your script.
Parameters
----------
style: string
'basic', 'article', 'poster', 'B&W', 'talk', 'origin'
kwargs: dict of rcParams
add Matplotlib rcParams
Examples
--------
>>> set_style('article')
>>> set_style('poster',**{'lines.linewidth':2})
See Also
--------
:func:`~publib.publib.fix_style`,
:func:`~publib.tools.tools.reset_defaults`,
:func:`~publib.tools.tools.regenerate_fonts` | [
"Changes",
"Matplotlib",
"basic",
"style",
"to",
"produce",
"high",
"quality",
"graphs",
".",
"Call",
"this",
"function",
"at",
"the",
"beginning",
"of",
"your",
"script",
".",
"You",
"can",
"even",
"further",
"improve",
"graphs",
"with",
"a",
"call",
"to",
"fix_style",
"at",
"the",
"end",
"of",
"your",
"script",
"."
] | 0417e6a31d52e23b816ac74d40b4c11d4b8ba4a6 | https://github.com/erwanp/publib/blob/0417e6a31d52e23b816ac74d40b4c11d4b8ba4a6/publib/main.py#L89-L125 | train |
erwanp/publib | publib/main.py | fix_style | def fix_style(style='basic', ax=None, **kwargs):
'''
Add an extra formatting layer to an axe, that couldn't be changed directly
in matplotlib.rcParams or with styles. Apply this function to every axe
you created.
Parameters
----------
ax: a matplotlib axe.
If None, the last axe generated is used
style: string or list of string
['basic', 'article', 'poster', 'B&W','talk','origin']
one of the styles previously defined. It should match the style you
chose in set_style but nothing forces you to.
kwargs: dict
edit any of the style_params keys. ex:
>>> tight_layout=False
Examples
--------
plb.set_style('poster')
plt.plot(a,np.cos(a))
plb.fix_style('poster',**{'draggable_legend':False})
See Also
--------
:func:`~publib.publib.set_style`
:func:`~publib.tools.tools.reset_defaults`
'''
style = _read_style(style)
# Apply all styles
for s in style:
if not s in style_params.keys():
avail = [f.replace('.mplstyle', '') for f in os.listdir(
_get_lib()) if f.endswith('.mplstyle')]
raise ValueError('{0} is not a valid style. '.format(s) +
'Please pick a style from the list available in ' +
'{0}: {1}'.format(_get_lib(), avail))
_fix_style(style, ax, **kwargs) | python | def fix_style(style='basic', ax=None, **kwargs):
'''
Add an extra formatting layer to an axe, that couldn't be changed directly
in matplotlib.rcParams or with styles. Apply this function to every axe
you created.
Parameters
----------
ax: a matplotlib axe.
If None, the last axe generated is used
style: string or list of string
['basic', 'article', 'poster', 'B&W','talk','origin']
one of the styles previously defined. It should match the style you
chose in set_style but nothing forces you to.
kwargs: dict
edit any of the style_params keys. ex:
>>> tight_layout=False
Examples
--------
plb.set_style('poster')
plt.plot(a,np.cos(a))
plb.fix_style('poster',**{'draggable_legend':False})
See Also
--------
:func:`~publib.publib.set_style`
:func:`~publib.tools.tools.reset_defaults`
'''
style = _read_style(style)
# Apply all styles
for s in style:
if not s in style_params.keys():
avail = [f.replace('.mplstyle', '') for f in os.listdir(
_get_lib()) if f.endswith('.mplstyle')]
raise ValueError('{0} is not a valid style. '.format(s) +
'Please pick a style from the list available in ' +
'{0}: {1}'.format(_get_lib(), avail))
_fix_style(style, ax, **kwargs) | [
"def",
"fix_style",
"(",
"style",
"=",
"'basic'",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"style",
"=",
"_read_style",
"(",
"style",
")",
"# Apply all styles",
"for",
"s",
"in",
"style",
":",
"if",
"not",
"s",
"in",
"style_params",
".",
"keys",
"(",
")",
":",
"avail",
"=",
"[",
"f",
".",
"replace",
"(",
"'.mplstyle'",
",",
"''",
")",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"_get_lib",
"(",
")",
")",
"if",
"f",
".",
"endswith",
"(",
"'.mplstyle'",
")",
"]",
"raise",
"ValueError",
"(",
"'{0} is not a valid style. '",
".",
"format",
"(",
"s",
")",
"+",
"'Please pick a style from the list available in '",
"+",
"'{0}: {1}'",
".",
"format",
"(",
"_get_lib",
"(",
")",
",",
"avail",
")",
")",
"_fix_style",
"(",
"style",
",",
"ax",
",",
"*",
"*",
"kwargs",
")"
] | Add an extra formatting layer to an axe, that couldn't be changed directly
in matplotlib.rcParams or with styles. Apply this function to every axe
you created.
Parameters
----------
ax: a matplotlib axe.
If None, the last axe generated is used
style: string or list of string
['basic', 'article', 'poster', 'B&W','talk','origin']
one of the styles previously defined. It should match the style you
chose in set_style but nothing forces you to.
kwargs: dict
edit any of the style_params keys. ex:
>>> tight_layout=False
Examples
--------
plb.set_style('poster')
plt.plot(a,np.cos(a))
plb.fix_style('poster',**{'draggable_legend':False})
See Also
--------
:func:`~publib.publib.set_style`
:func:`~publib.tools.tools.reset_defaults` | [
"Add",
"an",
"extra",
"formatting",
"layer",
"to",
"an",
"axe",
"that",
"couldn",
"t",
"be",
"changed",
"directly",
"in",
"matplotlib",
".",
"rcParams",
"or",
"with",
"styles",
".",
"Apply",
"this",
"function",
"to",
"every",
"axe",
"you",
"created",
"."
] | 0417e6a31d52e23b816ac74d40b4c11d4b8ba4a6 | https://github.com/erwanp/publib/blob/0417e6a31d52e23b816ac74d40b4c11d4b8ba4a6/publib/main.py#L147-L192 | train |
CitrineInformatics/pif-dft | dfttopif/parsers/abinit.py | AbinitParser._get_label | def _get_label(self):
'''Find the label for the output files
for this calculation
'''
if self._label is None:
foundfiles = False
for f in self._files:
if ".files" in f:
foundfiles = True
self._label = f.split(".")[0]
with open(self._label + '.files', 'r') as fp:
line = fp.readline().split()[0]
if line != self._label + ".in":
fp.close()
raise Exception('first line must be label.in')
line = fp.readline().split()[0]
if line != self._label + ".txt":
fp.close()
raise Exception('second line must be label.txt')
line = fp.readline().split()[0]
if line != self._label + "i":
fp.close()
raise Exception('third line must be labeli')
line = fp.readline().split()[0]
if line != self._label + "o":
fp.close()
raise Exception('fourth line must be labelo')
fp.close()
if foundfiles:
return self._label
else:
raise Exception('label.files not found')
#ASE format
# (self.prefix + '.in') # input
# (self.prefix + '.txt')# output
# (self.prefix + 'i') # input
# (self.prefix + 'o') # output
else:
return self._label | python | def _get_label(self):
'''Find the label for the output files
for this calculation
'''
if self._label is None:
foundfiles = False
for f in self._files:
if ".files" in f:
foundfiles = True
self._label = f.split(".")[0]
with open(self._label + '.files', 'r') as fp:
line = fp.readline().split()[0]
if line != self._label + ".in":
fp.close()
raise Exception('first line must be label.in')
line = fp.readline().split()[0]
if line != self._label + ".txt":
fp.close()
raise Exception('second line must be label.txt')
line = fp.readline().split()[0]
if line != self._label + "i":
fp.close()
raise Exception('third line must be labeli')
line = fp.readline().split()[0]
if line != self._label + "o":
fp.close()
raise Exception('fourth line must be labelo')
fp.close()
if foundfiles:
return self._label
else:
raise Exception('label.files not found')
#ASE format
# (self.prefix + '.in') # input
# (self.prefix + '.txt')# output
# (self.prefix + 'i') # input
# (self.prefix + 'o') # output
else:
return self._label | [
"def",
"_get_label",
"(",
"self",
")",
":",
"if",
"self",
".",
"_label",
"is",
"None",
":",
"foundfiles",
"=",
"False",
"for",
"f",
"in",
"self",
".",
"_files",
":",
"if",
"\".files\"",
"in",
"f",
":",
"foundfiles",
"=",
"True",
"self",
".",
"_label",
"=",
"f",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"with",
"open",
"(",
"self",
".",
"_label",
"+",
"'.files'",
",",
"'r'",
")",
"as",
"fp",
":",
"line",
"=",
"fp",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"if",
"line",
"!=",
"self",
".",
"_label",
"+",
"\".in\"",
":",
"fp",
".",
"close",
"(",
")",
"raise",
"Exception",
"(",
"'first line must be label.in'",
")",
"line",
"=",
"fp",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"if",
"line",
"!=",
"self",
".",
"_label",
"+",
"\".txt\"",
":",
"fp",
".",
"close",
"(",
")",
"raise",
"Exception",
"(",
"'second line must be label.txt'",
")",
"line",
"=",
"fp",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"if",
"line",
"!=",
"self",
".",
"_label",
"+",
"\"i\"",
":",
"fp",
".",
"close",
"(",
")",
"raise",
"Exception",
"(",
"'third line must be labeli'",
")",
"line",
"=",
"fp",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"if",
"line",
"!=",
"self",
".",
"_label",
"+",
"\"o\"",
":",
"fp",
".",
"close",
"(",
")",
"raise",
"Exception",
"(",
"'fourth line must be labelo'",
")",
"fp",
".",
"close",
"(",
")",
"if",
"foundfiles",
":",
"return",
"self",
".",
"_label",
"else",
":",
"raise",
"Exception",
"(",
"'label.files not found'",
")",
"#ASE format",
"# (self.prefix + '.in') # input",
"# (self.prefix + '.txt')# output",
"# (self.prefix + 'i') # input",
"# (self.prefix + 'o') # output",
"else",
":",
"return",
"self",
".",
"_label"
] | Find the label for the output files
for this calculation | [
"Find",
"the",
"label",
"for",
"the",
"output",
"files",
"for",
"this",
"calculation"
] | d5411dc1f6c6e8d454b132977ca7ab3bb8131a80 | https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/abinit.py#L30-L70 | train |
rosshamish/catan-py | catan/states.py | GameStateInGame.next_player | def next_player(self):
"""
Returns the player whose turn it will be next.
Uses regular seat-wise clockwise rotation.
Compare to GameStatePreGame's implementation, which uses snake draft.
:return Player
"""
logging.warning('turn={}, players={}'.format(
self.game._cur_turn,
self.game.players
))
return self.game.players[(self.game._cur_turn + 1) % len(self.game.players)] | python | def next_player(self):
"""
Returns the player whose turn it will be next.
Uses regular seat-wise clockwise rotation.
Compare to GameStatePreGame's implementation, which uses snake draft.
:return Player
"""
logging.warning('turn={}, players={}'.format(
self.game._cur_turn,
self.game.players
))
return self.game.players[(self.game._cur_turn + 1) % len(self.game.players)] | [
"def",
"next_player",
"(",
"self",
")",
":",
"logging",
".",
"warning",
"(",
"'turn={}, players={}'",
".",
"format",
"(",
"self",
".",
"game",
".",
"_cur_turn",
",",
"self",
".",
"game",
".",
"players",
")",
")",
"return",
"self",
".",
"game",
".",
"players",
"[",
"(",
"self",
".",
"game",
".",
"_cur_turn",
"+",
"1",
")",
"%",
"len",
"(",
"self",
".",
"game",
".",
"players",
")",
"]"
] | Returns the player whose turn it will be next.
Uses regular seat-wise clockwise rotation.
Compare to GameStatePreGame's implementation, which uses snake draft.
:return Player | [
"Returns",
"the",
"player",
"whose",
"turn",
"it",
"will",
"be",
"next",
"."
] | 120438a8f16e39c13322c5d5930e1064e1d3f4be | https://github.com/rosshamish/catan-py/blob/120438a8f16e39c13322c5d5930e1064e1d3f4be/catan/states.py#L184-L198 | train |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/core/script_iterator.py | ScriptIterator._estimate_progress | def _estimate_progress(self):
"""
estimates the current progress that is then used in _receive_signal
:return: current progress in percent
"""
estimate = True
# ==== get the current subscript and the time it takes to execute it =====
current_subscript = self._current_subscript_stage['current_subscript']
# ==== get the number of subscripts =====
num_subscripts = len(self.scripts)
# ==== get number of iterations and loop index ======================
if self.iterator_type == 'loop':
num_iterations = self.settings['num_loops']
elif self.iterator_type == 'sweep':
sweep_range = self.settings['sweep_range']
if self.settings['stepping_mode'] == 'value_step':
num_iterations = int((sweep_range['max_value'] - sweep_range['min_value']) / sweep_range['N/value_step']) + 1
# len(np.linspace(sweep_range['min_value'], sweep_range['max_value'],
# (sweep_range['max_value'] - sweep_range['min_value']) /
# sweep_range['N/value_step'] + 1, endpoint=True).tolist())
elif self.settings['stepping_mode'] == 'N':
num_iterations = sweep_range['N/value_step']
else:
raise KeyError('unknown key' + self.settings['stepping_mode'])
else:
print('unknown iterator type in Iterator receive signal - can\'t estimate ramining time')
estimate = False
if estimate:
# get number of loops (completed + 1)
loop_index = self.loop_index
if num_subscripts > 1:
# estimate the progress based on the duration the individual subscripts
loop_execution_time = 0. # time for a single loop execution in s
sub_progress_time = 0. # progress of current loop iteration in s
# ==== get typical duration of current subscript ======================
if current_subscript is not None:
current_subscript_exec_duration = self._current_subscript_stage['subscript_exec_duration'][
current_subscript.name].total_seconds()
else:
current_subscript_exec_duration = 0.0
current_subscript_elapsed_time = (datetime.datetime.now() - current_subscript.start_time).total_seconds()
# estimate the duration of the current subscript if the script hasn't been executed once fully and subscript_exec_duration is 0
if current_subscript_exec_duration == 0.0:
remaining_time = current_subscript.remaining_time.total_seconds()
current_subscript_exec_duration = remaining_time + current_subscript_elapsed_time
# ==== get typical duration of one loop iteration ======================
remaining_scripts = 0 # script that remain to be executed for the first time
for subscript_name, duration in self._current_subscript_stage['subscript_exec_duration'].items():
if duration.total_seconds() == 0.0:
remaining_scripts += 1
loop_execution_time += duration.total_seconds()
# add the times of the subscripts that have been executed in the current loop
# ignore the current subscript, because that will be taken care of later
if self._current_subscript_stage['subscript_exec_count'][subscript_name] == loop_index \
and subscript_name is not current_subscript.name:
# this subscript has already been executed in this iteration
sub_progress_time += duration.total_seconds()
# add the proportional duration of the current subscript given by the subscript progress
sub_progress_time += current_subscript_elapsed_time
# if there are scripts that have not been executed yet
# assume that all the scripts that have not been executed yet take as long as the average of the other scripts
if remaining_scripts == num_subscripts:
# none of the subscript has been finished. assume that all the scripts take as long as the first
loop_execution_time = num_subscripts * current_subscript_exec_duration
elif remaining_scripts > 1:
loop_execution_time = 1. * num_subscripts / (num_subscripts - remaining_scripts)
elif remaining_scripts == 1:
# there is only one script left which is the current script
loop_execution_time += current_subscript_exec_duration
if loop_execution_time > 0:
progress_subscript = 100. * sub_progress_time / loop_execution_time
else:
progress_subscript = 1. * progress_subscript / num_subscripts
# print(' === script iterator progress estimation loop_index = {:d}/{:d}, progress_subscript = {:f}'.format(loop_index, number_of_iterations, progress_subscript))
progress = 100. * (loop_index - 1. + 0.01 * progress_subscript) / num_iterations
else:
# if can't estimate the remaining time set to half
progress = 50
return progress | python | def _estimate_progress(self):
"""
estimates the current progress that is then used in _receive_signal
:return: current progress in percent
"""
estimate = True
# ==== get the current subscript and the time it takes to execute it =====
current_subscript = self._current_subscript_stage['current_subscript']
# ==== get the number of subscripts =====
num_subscripts = len(self.scripts)
# ==== get number of iterations and loop index ======================
if self.iterator_type == 'loop':
num_iterations = self.settings['num_loops']
elif self.iterator_type == 'sweep':
sweep_range = self.settings['sweep_range']
if self.settings['stepping_mode'] == 'value_step':
num_iterations = int((sweep_range['max_value'] - sweep_range['min_value']) / sweep_range['N/value_step']) + 1
# len(np.linspace(sweep_range['min_value'], sweep_range['max_value'],
# (sweep_range['max_value'] - sweep_range['min_value']) /
# sweep_range['N/value_step'] + 1, endpoint=True).tolist())
elif self.settings['stepping_mode'] == 'N':
num_iterations = sweep_range['N/value_step']
else:
raise KeyError('unknown key' + self.settings['stepping_mode'])
else:
print('unknown iterator type in Iterator receive signal - can\'t estimate ramining time')
estimate = False
if estimate:
# get number of loops (completed + 1)
loop_index = self.loop_index
if num_subscripts > 1:
# estimate the progress based on the duration the individual subscripts
loop_execution_time = 0. # time for a single loop execution in s
sub_progress_time = 0. # progress of current loop iteration in s
# ==== get typical duration of current subscript ======================
if current_subscript is not None:
current_subscript_exec_duration = self._current_subscript_stage['subscript_exec_duration'][
current_subscript.name].total_seconds()
else:
current_subscript_exec_duration = 0.0
current_subscript_elapsed_time = (datetime.datetime.now() - current_subscript.start_time).total_seconds()
# estimate the duration of the current subscript if the script hasn't been executed once fully and subscript_exec_duration is 0
if current_subscript_exec_duration == 0.0:
remaining_time = current_subscript.remaining_time.total_seconds()
current_subscript_exec_duration = remaining_time + current_subscript_elapsed_time
# ==== get typical duration of one loop iteration ======================
remaining_scripts = 0 # script that remain to be executed for the first time
for subscript_name, duration in self._current_subscript_stage['subscript_exec_duration'].items():
if duration.total_seconds() == 0.0:
remaining_scripts += 1
loop_execution_time += duration.total_seconds()
# add the times of the subscripts that have been executed in the current loop
# ignore the current subscript, because that will be taken care of later
if self._current_subscript_stage['subscript_exec_count'][subscript_name] == loop_index \
and subscript_name is not current_subscript.name:
# this subscript has already been executed in this iteration
sub_progress_time += duration.total_seconds()
# add the proportional duration of the current subscript given by the subscript progress
sub_progress_time += current_subscript_elapsed_time
# if there are scripts that have not been executed yet
# assume that all the scripts that have not been executed yet take as long as the average of the other scripts
if remaining_scripts == num_subscripts:
# none of the subscript has been finished. assume that all the scripts take as long as the first
loop_execution_time = num_subscripts * current_subscript_exec_duration
elif remaining_scripts > 1:
loop_execution_time = 1. * num_subscripts / (num_subscripts - remaining_scripts)
elif remaining_scripts == 1:
# there is only one script left which is the current script
loop_execution_time += current_subscript_exec_duration
if loop_execution_time > 0:
progress_subscript = 100. * sub_progress_time / loop_execution_time
else:
progress_subscript = 1. * progress_subscript / num_subscripts
# print(' === script iterator progress estimation loop_index = {:d}/{:d}, progress_subscript = {:f}'.format(loop_index, number_of_iterations, progress_subscript))
progress = 100. * (loop_index - 1. + 0.01 * progress_subscript) / num_iterations
else:
# if can't estimate the remaining time set to half
progress = 50
return progress | [
"def",
"_estimate_progress",
"(",
"self",
")",
":",
"estimate",
"=",
"True",
"# ==== get the current subscript and the time it takes to execute it =====",
"current_subscript",
"=",
"self",
".",
"_current_subscript_stage",
"[",
"'current_subscript'",
"]",
"# ==== get the number of subscripts =====",
"num_subscripts",
"=",
"len",
"(",
"self",
".",
"scripts",
")",
"# ==== get number of iterations and loop index ======================",
"if",
"self",
".",
"iterator_type",
"==",
"'loop'",
":",
"num_iterations",
"=",
"self",
".",
"settings",
"[",
"'num_loops'",
"]",
"elif",
"self",
".",
"iterator_type",
"==",
"'sweep'",
":",
"sweep_range",
"=",
"self",
".",
"settings",
"[",
"'sweep_range'",
"]",
"if",
"self",
".",
"settings",
"[",
"'stepping_mode'",
"]",
"==",
"'value_step'",
":",
"num_iterations",
"=",
"int",
"(",
"(",
"sweep_range",
"[",
"'max_value'",
"]",
"-",
"sweep_range",
"[",
"'min_value'",
"]",
")",
"/",
"sweep_range",
"[",
"'N/value_step'",
"]",
")",
"+",
"1",
"# len(np.linspace(sweep_range['min_value'], sweep_range['max_value'],",
"# (sweep_range['max_value'] - sweep_range['min_value']) /",
"# sweep_range['N/value_step'] + 1, endpoint=True).tolist())",
"elif",
"self",
".",
"settings",
"[",
"'stepping_mode'",
"]",
"==",
"'N'",
":",
"num_iterations",
"=",
"sweep_range",
"[",
"'N/value_step'",
"]",
"else",
":",
"raise",
"KeyError",
"(",
"'unknown key'",
"+",
"self",
".",
"settings",
"[",
"'stepping_mode'",
"]",
")",
"else",
":",
"print",
"(",
"'unknown iterator type in Iterator receive signal - can\\'t estimate ramining time'",
")",
"estimate",
"=",
"False",
"if",
"estimate",
":",
"# get number of loops (completed + 1)",
"loop_index",
"=",
"self",
".",
"loop_index",
"if",
"num_subscripts",
">",
"1",
":",
"# estimate the progress based on the duration the individual subscripts",
"loop_execution_time",
"=",
"0.",
"# time for a single loop execution in s",
"sub_progress_time",
"=",
"0.",
"# progress of current loop iteration in s",
"# ==== get typical duration of current subscript ======================",
"if",
"current_subscript",
"is",
"not",
"None",
":",
"current_subscript_exec_duration",
"=",
"self",
".",
"_current_subscript_stage",
"[",
"'subscript_exec_duration'",
"]",
"[",
"current_subscript",
".",
"name",
"]",
".",
"total_seconds",
"(",
")",
"else",
":",
"current_subscript_exec_duration",
"=",
"0.0",
"current_subscript_elapsed_time",
"=",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"current_subscript",
".",
"start_time",
")",
".",
"total_seconds",
"(",
")",
"# estimate the duration of the current subscript if the script hasn't been executed once fully and subscript_exec_duration is 0",
"if",
"current_subscript_exec_duration",
"==",
"0.0",
":",
"remaining_time",
"=",
"current_subscript",
".",
"remaining_time",
".",
"total_seconds",
"(",
")",
"current_subscript_exec_duration",
"=",
"remaining_time",
"+",
"current_subscript_elapsed_time",
"# ==== get typical duration of one loop iteration ======================",
"remaining_scripts",
"=",
"0",
"# script that remain to be executed for the first time",
"for",
"subscript_name",
",",
"duration",
"in",
"self",
".",
"_current_subscript_stage",
"[",
"'subscript_exec_duration'",
"]",
".",
"items",
"(",
")",
":",
"if",
"duration",
".",
"total_seconds",
"(",
")",
"==",
"0.0",
":",
"remaining_scripts",
"+=",
"1",
"loop_execution_time",
"+=",
"duration",
".",
"total_seconds",
"(",
")",
"# add the times of the subscripts that have been executed in the current loop",
"# ignore the current subscript, because that will be taken care of later",
"if",
"self",
".",
"_current_subscript_stage",
"[",
"'subscript_exec_count'",
"]",
"[",
"subscript_name",
"]",
"==",
"loop_index",
"and",
"subscript_name",
"is",
"not",
"current_subscript",
".",
"name",
":",
"# this subscript has already been executed in this iteration",
"sub_progress_time",
"+=",
"duration",
".",
"total_seconds",
"(",
")",
"# add the proportional duration of the current subscript given by the subscript progress",
"sub_progress_time",
"+=",
"current_subscript_elapsed_time",
"# if there are scripts that have not been executed yet",
"# assume that all the scripts that have not been executed yet take as long as the average of the other scripts",
"if",
"remaining_scripts",
"==",
"num_subscripts",
":",
"# none of the subscript has been finished. assume that all the scripts take as long as the first",
"loop_execution_time",
"=",
"num_subscripts",
"*",
"current_subscript_exec_duration",
"elif",
"remaining_scripts",
">",
"1",
":",
"loop_execution_time",
"=",
"1.",
"*",
"num_subscripts",
"/",
"(",
"num_subscripts",
"-",
"remaining_scripts",
")",
"elif",
"remaining_scripts",
"==",
"1",
":",
"# there is only one script left which is the current script",
"loop_execution_time",
"+=",
"current_subscript_exec_duration",
"if",
"loop_execution_time",
">",
"0",
":",
"progress_subscript",
"=",
"100.",
"*",
"sub_progress_time",
"/",
"loop_execution_time",
"else",
":",
"progress_subscript",
"=",
"1.",
"*",
"progress_subscript",
"/",
"num_subscripts",
"# print(' === script iterator progress estimation loop_index = {:d}/{:d}, progress_subscript = {:f}'.format(loop_index, number_of_iterations, progress_subscript))",
"progress",
"=",
"100.",
"*",
"(",
"loop_index",
"-",
"1.",
"+",
"0.01",
"*",
"progress_subscript",
")",
"/",
"num_iterations",
"else",
":",
"# if can't estimate the remaining time set to half",
"progress",
"=",
"50",
"return",
"progress"
] | estimates the current progress that is then used in _receive_signal
:return: current progress in percent | [
"estimates",
"the",
"current",
"progress",
"that",
"is",
"then",
"used",
"in",
"_receive_signal"
] | 67482e5157fcd1c40705e5c2cacfb93564703ed0 | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/core/script_iterator.py#L242-L338 | train |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/core/script_iterator.py | ScriptIterator.plot | def plot(self, figure_list):
'''
When each subscript is called, uses its standard plotting
Args:
figure_list: list of figures passed from the guit
'''
#TODO: be smarter about how we plot ScriptIterator
if self._current_subscript_stage is not None:
if self._current_subscript_stage['current_subscript'] is not None:
self._current_subscript_stage['current_subscript'].plot(figure_list)
if (self.is_running is False) and not (self.data == {} or self.data is None):
script_names = list(self.settings['script_order'].keys())
script_indices = [self.settings['script_order'][name] for name in script_names]
_, sorted_script_names = list(zip(*sorted(zip(script_indices, script_names))))
last_script = self.scripts[sorted_script_names[-1]]
last_script.force_update() # since we use the last script plot function we force it to refresh
axes_list = last_script.get_axes_layout(figure_list)
# catch error is _plot function doens't take optional data argument
try:
last_script._plot(axes_list, self.data)
except TypeError as err:
print((warnings.warn('can\'t plot average script data because script.plot function doens\'t take data as optional argument. Plotting last data set instead')))
print((err.message))
last_script.plot(figure_list) | python | def plot(self, figure_list):
'''
When each subscript is called, uses its standard plotting
Args:
figure_list: list of figures passed from the guit
'''
#TODO: be smarter about how we plot ScriptIterator
if self._current_subscript_stage is not None:
if self._current_subscript_stage['current_subscript'] is not None:
self._current_subscript_stage['current_subscript'].plot(figure_list)
if (self.is_running is False) and not (self.data == {} or self.data is None):
script_names = list(self.settings['script_order'].keys())
script_indices = [self.settings['script_order'][name] for name in script_names]
_, sorted_script_names = list(zip(*sorted(zip(script_indices, script_names))))
last_script = self.scripts[sorted_script_names[-1]]
last_script.force_update() # since we use the last script plot function we force it to refresh
axes_list = last_script.get_axes_layout(figure_list)
# catch error is _plot function doens't take optional data argument
try:
last_script._plot(axes_list, self.data)
except TypeError as err:
print((warnings.warn('can\'t plot average script data because script.plot function doens\'t take data as optional argument. Plotting last data set instead')))
print((err.message))
last_script.plot(figure_list) | [
"def",
"plot",
"(",
"self",
",",
"figure_list",
")",
":",
"#TODO: be smarter about how we plot ScriptIterator",
"if",
"self",
".",
"_current_subscript_stage",
"is",
"not",
"None",
":",
"if",
"self",
".",
"_current_subscript_stage",
"[",
"'current_subscript'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"_current_subscript_stage",
"[",
"'current_subscript'",
"]",
".",
"plot",
"(",
"figure_list",
")",
"if",
"(",
"self",
".",
"is_running",
"is",
"False",
")",
"and",
"not",
"(",
"self",
".",
"data",
"==",
"{",
"}",
"or",
"self",
".",
"data",
"is",
"None",
")",
":",
"script_names",
"=",
"list",
"(",
"self",
".",
"settings",
"[",
"'script_order'",
"]",
".",
"keys",
"(",
")",
")",
"script_indices",
"=",
"[",
"self",
".",
"settings",
"[",
"'script_order'",
"]",
"[",
"name",
"]",
"for",
"name",
"in",
"script_names",
"]",
"_",
",",
"sorted_script_names",
"=",
"list",
"(",
"zip",
"(",
"*",
"sorted",
"(",
"zip",
"(",
"script_indices",
",",
"script_names",
")",
")",
")",
")",
"last_script",
"=",
"self",
".",
"scripts",
"[",
"sorted_script_names",
"[",
"-",
"1",
"]",
"]",
"last_script",
".",
"force_update",
"(",
")",
"# since we use the last script plot function we force it to refresh",
"axes_list",
"=",
"last_script",
".",
"get_axes_layout",
"(",
"figure_list",
")",
"# catch error is _plot function doens't take optional data argument",
"try",
":",
"last_script",
".",
"_plot",
"(",
"axes_list",
",",
"self",
".",
"data",
")",
"except",
"TypeError",
"as",
"err",
":",
"print",
"(",
"(",
"warnings",
".",
"warn",
"(",
"'can\\'t plot average script data because script.plot function doens\\'t take data as optional argument. Plotting last data set instead'",
")",
")",
")",
"print",
"(",
"(",
"err",
".",
"message",
")",
")",
"last_script",
".",
"plot",
"(",
"figure_list",
")"
] | When each subscript is called, uses its standard plotting
Args:
figure_list: list of figures passed from the guit | [
"When",
"each",
"subscript",
"is",
"called",
"uses",
"its",
"standard",
"plotting"
] | 67482e5157fcd1c40705e5c2cacfb93564703ed0 | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/core/script_iterator.py#L360-L392 | train |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/core/script_iterator.py | ScriptIterator.get_default_settings | def get_default_settings(sub_scripts, script_order, script_execution_freq, iterator_type):
"""
assigning the actual script settings depending on the iterator type
this might be overwritten by classes that inherit form ScriptIterator
Args:
sub_scripts: dictionary with the subscripts
script_order: execution order of subscripts
script_execution_freq: execution frequency of subscripts
Returns:
the default setting for the iterator
"""
def populate_sweep_param(scripts, parameter_list, trace=''):
'''
Args:
scripts: a dict of {'class name': <class object>} pairs
Returns: A list of all parameters of the input scripts
'''
def get_parameter_from_dict(trace, dic, parameter_list, valid_values=None):
"""
appends keys in the dict to a list in the form trace.key.subkey.subsubkey...
Args:
trace: initial prefix (path through scripts and parameters to current location)
dic: dictionary
parameter_list: list to which append the parameters
valid_values: valid values of dictionary values if None dic should be a dictionary
Returns:
"""
if valid_values is None and isinstance(dic, Parameter):
valid_values = dic.valid_values
for key, value in dic.items():
if isinstance(value, dict): # for nested parameters ex {point: {'x': int, 'y': int}}
parameter_list = get_parameter_from_dict(trace + '.' + key, value, parameter_list,
dic.valid_values[key])
elif (valid_values[key] in (float, int)) or \
(isinstance(valid_values[key], list) and valid_values[key][0] in (float, int)):
parameter_list.append(trace + '.' + key)
else: # once down to the form {key: value}
# in all other cases ignore parameter
print(('ignoring sweep parameter', key))
return parameter_list
for script_name in list(scripts.keys()):
from pylabcontrol.core import ScriptIterator
script_trace = trace
if script_trace == '':
script_trace = script_name
else:
script_trace = script_trace + '->' + script_name
if issubclass(scripts[script_name], ScriptIterator): # gets subscripts of ScriptIterator objects
populate_sweep_param(vars(scripts[script_name])['_SCRIPTS'], parameter_list=parameter_list,
trace=script_trace)
else:
# use inspect instead of vars to get _DEFAULT_SETTINGS also for classes that inherit _DEFAULT_SETTINGS from a superclass
for setting in \
[elem[1] for elem in inspect.getmembers(scripts[script_name]) if elem[0] == '_DEFAULT_SETTINGS'][0]:
parameter_list = get_parameter_from_dict(script_trace, setting, parameter_list)
return parameter_list
if iterator_type == 'loop':
script_default_settings = [
Parameter('script_order', script_order),
Parameter('script_execution_freq', script_execution_freq),
Parameter('num_loops', 0, int, 'times the subscripts will be executed'),
Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')
]
elif iterator_type == 'sweep':
sweep_params = populate_sweep_param(sub_scripts, [])
script_default_settings = [
Parameter('script_order', script_order),
Parameter('script_execution_freq', script_execution_freq),
Parameter('sweep_param', sweep_params[0], sweep_params, 'variable over which to sweep'),
Parameter('sweep_range',
[Parameter('min_value', 0, float, 'min parameter value'),
Parameter('max_value', 0, float, 'max parameter value'),
Parameter('N/value_step', 0, float,
'either number of steps or parameter value step, depending on mode')]),
Parameter('stepping_mode', 'N', ['N', 'value_step'],
'Switch between number of steps and step amount'),
Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')
]
else:
print(('unknown iterator type ' + iterator_type))
raise TypeError('unknown iterator type ' + iterator_type)
return script_default_settings | python | def get_default_settings(sub_scripts, script_order, script_execution_freq, iterator_type):
"""
assigning the actual script settings depending on the iterator type
this might be overwritten by classes that inherit form ScriptIterator
Args:
sub_scripts: dictionary with the subscripts
script_order: execution order of subscripts
script_execution_freq: execution frequency of subscripts
Returns:
the default setting for the iterator
"""
def populate_sweep_param(scripts, parameter_list, trace=''):
'''
Args:
scripts: a dict of {'class name': <class object>} pairs
Returns: A list of all parameters of the input scripts
'''
def get_parameter_from_dict(trace, dic, parameter_list, valid_values=None):
"""
appends keys in the dict to a list in the form trace.key.subkey.subsubkey...
Args:
trace: initial prefix (path through scripts and parameters to current location)
dic: dictionary
parameter_list: list to which append the parameters
valid_values: valid values of dictionary values if None dic should be a dictionary
Returns:
"""
if valid_values is None and isinstance(dic, Parameter):
valid_values = dic.valid_values
for key, value in dic.items():
if isinstance(value, dict): # for nested parameters ex {point: {'x': int, 'y': int}}
parameter_list = get_parameter_from_dict(trace + '.' + key, value, parameter_list,
dic.valid_values[key])
elif (valid_values[key] in (float, int)) or \
(isinstance(valid_values[key], list) and valid_values[key][0] in (float, int)):
parameter_list.append(trace + '.' + key)
else: # once down to the form {key: value}
# in all other cases ignore parameter
print(('ignoring sweep parameter', key))
return parameter_list
for script_name in list(scripts.keys()):
from pylabcontrol.core import ScriptIterator
script_trace = trace
if script_trace == '':
script_trace = script_name
else:
script_trace = script_trace + '->' + script_name
if issubclass(scripts[script_name], ScriptIterator): # gets subscripts of ScriptIterator objects
populate_sweep_param(vars(scripts[script_name])['_SCRIPTS'], parameter_list=parameter_list,
trace=script_trace)
else:
# use inspect instead of vars to get _DEFAULT_SETTINGS also for classes that inherit _DEFAULT_SETTINGS from a superclass
for setting in \
[elem[1] for elem in inspect.getmembers(scripts[script_name]) if elem[0] == '_DEFAULT_SETTINGS'][0]:
parameter_list = get_parameter_from_dict(script_trace, setting, parameter_list)
return parameter_list
if iterator_type == 'loop':
script_default_settings = [
Parameter('script_order', script_order),
Parameter('script_execution_freq', script_execution_freq),
Parameter('num_loops', 0, int, 'times the subscripts will be executed'),
Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')
]
elif iterator_type == 'sweep':
sweep_params = populate_sweep_param(sub_scripts, [])
script_default_settings = [
Parameter('script_order', script_order),
Parameter('script_execution_freq', script_execution_freq),
Parameter('sweep_param', sweep_params[0], sweep_params, 'variable over which to sweep'),
Parameter('sweep_range',
[Parameter('min_value', 0, float, 'min parameter value'),
Parameter('max_value', 0, float, 'max parameter value'),
Parameter('N/value_step', 0, float,
'either number of steps or parameter value step, depending on mode')]),
Parameter('stepping_mode', 'N', ['N', 'value_step'],
'Switch between number of steps and step amount'),
Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')
]
else:
print(('unknown iterator type ' + iterator_type))
raise TypeError('unknown iterator type ' + iterator_type)
return script_default_settings | [
"def",
"get_default_settings",
"(",
"sub_scripts",
",",
"script_order",
",",
"script_execution_freq",
",",
"iterator_type",
")",
":",
"def",
"populate_sweep_param",
"(",
"scripts",
",",
"parameter_list",
",",
"trace",
"=",
"''",
")",
":",
"'''\n\n Args:\n scripts: a dict of {'class name': <class object>} pairs\n\n Returns: A list of all parameters of the input scripts\n\n '''",
"def",
"get_parameter_from_dict",
"(",
"trace",
",",
"dic",
",",
"parameter_list",
",",
"valid_values",
"=",
"None",
")",
":",
"\"\"\"\n appends keys in the dict to a list in the form trace.key.subkey.subsubkey...\n Args:\n trace: initial prefix (path through scripts and parameters to current location)\n dic: dictionary\n parameter_list: list to which append the parameters\n\n valid_values: valid values of dictionary values if None dic should be a dictionary\n\n Returns:\n\n \"\"\"",
"if",
"valid_values",
"is",
"None",
"and",
"isinstance",
"(",
"dic",
",",
"Parameter",
")",
":",
"valid_values",
"=",
"dic",
".",
"valid_values",
"for",
"key",
",",
"value",
"in",
"dic",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"# for nested parameters ex {point: {'x': int, 'y': int}}",
"parameter_list",
"=",
"get_parameter_from_dict",
"(",
"trace",
"+",
"'.'",
"+",
"key",
",",
"value",
",",
"parameter_list",
",",
"dic",
".",
"valid_values",
"[",
"key",
"]",
")",
"elif",
"(",
"valid_values",
"[",
"key",
"]",
"in",
"(",
"float",
",",
"int",
")",
")",
"or",
"(",
"isinstance",
"(",
"valid_values",
"[",
"key",
"]",
",",
"list",
")",
"and",
"valid_values",
"[",
"key",
"]",
"[",
"0",
"]",
"in",
"(",
"float",
",",
"int",
")",
")",
":",
"parameter_list",
".",
"append",
"(",
"trace",
"+",
"'.'",
"+",
"key",
")",
"else",
":",
"# once down to the form {key: value}",
"# in all other cases ignore parameter",
"print",
"(",
"(",
"'ignoring sweep parameter'",
",",
"key",
")",
")",
"return",
"parameter_list",
"for",
"script_name",
"in",
"list",
"(",
"scripts",
".",
"keys",
"(",
")",
")",
":",
"from",
"pylabcontrol",
".",
"core",
"import",
"ScriptIterator",
"script_trace",
"=",
"trace",
"if",
"script_trace",
"==",
"''",
":",
"script_trace",
"=",
"script_name",
"else",
":",
"script_trace",
"=",
"script_trace",
"+",
"'->'",
"+",
"script_name",
"if",
"issubclass",
"(",
"scripts",
"[",
"script_name",
"]",
",",
"ScriptIterator",
")",
":",
"# gets subscripts of ScriptIterator objects",
"populate_sweep_param",
"(",
"vars",
"(",
"scripts",
"[",
"script_name",
"]",
")",
"[",
"'_SCRIPTS'",
"]",
",",
"parameter_list",
"=",
"parameter_list",
",",
"trace",
"=",
"script_trace",
")",
"else",
":",
"# use inspect instead of vars to get _DEFAULT_SETTINGS also for classes that inherit _DEFAULT_SETTINGS from a superclass",
"for",
"setting",
"in",
"[",
"elem",
"[",
"1",
"]",
"for",
"elem",
"in",
"inspect",
".",
"getmembers",
"(",
"scripts",
"[",
"script_name",
"]",
")",
"if",
"elem",
"[",
"0",
"]",
"==",
"'_DEFAULT_SETTINGS'",
"]",
"[",
"0",
"]",
":",
"parameter_list",
"=",
"get_parameter_from_dict",
"(",
"script_trace",
",",
"setting",
",",
"parameter_list",
")",
"return",
"parameter_list",
"if",
"iterator_type",
"==",
"'loop'",
":",
"script_default_settings",
"=",
"[",
"Parameter",
"(",
"'script_order'",
",",
"script_order",
")",
",",
"Parameter",
"(",
"'script_execution_freq'",
",",
"script_execution_freq",
")",
",",
"Parameter",
"(",
"'num_loops'",
",",
"0",
",",
"int",
",",
"'times the subscripts will be executed'",
")",
",",
"Parameter",
"(",
"'run_all_first'",
",",
"True",
",",
"bool",
",",
"'Run all scripts with nonzero frequency in first pass'",
")",
"]",
"elif",
"iterator_type",
"==",
"'sweep'",
":",
"sweep_params",
"=",
"populate_sweep_param",
"(",
"sub_scripts",
",",
"[",
"]",
")",
"script_default_settings",
"=",
"[",
"Parameter",
"(",
"'script_order'",
",",
"script_order",
")",
",",
"Parameter",
"(",
"'script_execution_freq'",
",",
"script_execution_freq",
")",
",",
"Parameter",
"(",
"'sweep_param'",
",",
"sweep_params",
"[",
"0",
"]",
",",
"sweep_params",
",",
"'variable over which to sweep'",
")",
",",
"Parameter",
"(",
"'sweep_range'",
",",
"[",
"Parameter",
"(",
"'min_value'",
",",
"0",
",",
"float",
",",
"'min parameter value'",
")",
",",
"Parameter",
"(",
"'max_value'",
",",
"0",
",",
"float",
",",
"'max parameter value'",
")",
",",
"Parameter",
"(",
"'N/value_step'",
",",
"0",
",",
"float",
",",
"'either number of steps or parameter value step, depending on mode'",
")",
"]",
")",
",",
"Parameter",
"(",
"'stepping_mode'",
",",
"'N'",
",",
"[",
"'N'",
",",
"'value_step'",
"]",
",",
"'Switch between number of steps and step amount'",
")",
",",
"Parameter",
"(",
"'run_all_first'",
",",
"True",
",",
"bool",
",",
"'Run all scripts with nonzero frequency in first pass'",
")",
"]",
"else",
":",
"print",
"(",
"(",
"'unknown iterator type '",
"+",
"iterator_type",
")",
")",
"raise",
"TypeError",
"(",
"'unknown iterator type '",
"+",
"iterator_type",
")",
"return",
"script_default_settings"
] | assigning the actual script settings depending on the iterator type
this might be overwritten by classes that inherit form ScriptIterator
Args:
sub_scripts: dictionary with the subscripts
script_order: execution order of subscripts
script_execution_freq: execution frequency of subscripts
Returns:
the default setting for the iterator | [
"assigning",
"the",
"actual",
"script",
"settings",
"depending",
"on",
"the",
"iterator",
"type"
] | 67482e5157fcd1c40705e5c2cacfb93564703ed0 | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/core/script_iterator.py#L451-L552 | train |
lreis2415/PyGeoC | pygeoc/vector.py | VectorUtilClass.raster2shp | def raster2shp(rasterfile, vectorshp, layername=None, fieldname=None,
band_num=1, mask='default'):
"""Convert raster to ESRI shapefile"""
FileClass.remove_files(vectorshp)
FileClass.check_file_exists(rasterfile)
# this allows GDAL to throw Python Exceptions
gdal.UseExceptions()
src_ds = gdal.Open(rasterfile)
if src_ds is None:
print('Unable to open %s' % rasterfile)
sys.exit(1)
try:
srcband = src_ds.GetRasterBand(band_num)
except RuntimeError as e:
# for example, try GetRasterBand(10)
print('Band ( %i ) not found, %s' % (band_num, e))
sys.exit(1)
if mask == 'default':
maskband = srcband.GetMaskBand()
elif mask is None or mask.upper() == 'NONE':
maskband = None
else:
mask_ds = gdal.Open(mask)
maskband = mask_ds.GetRasterBand(1)
# create output datasource
if layername is None:
layername = FileClass.get_core_name_without_suffix(rasterfile)
drv = ogr_GetDriverByName(str('ESRI Shapefile'))
dst_ds = drv.CreateDataSource(vectorshp)
srs = None
if src_ds.GetProjection() != '':
srs = osr_SpatialReference()
srs.ImportFromWkt(src_ds.GetProjection())
dst_layer = dst_ds.CreateLayer(str(layername), srs=srs)
if fieldname is None:
fieldname = layername.upper()
fd = ogr_FieldDefn(str(fieldname), OFTInteger)
dst_layer.CreateField(fd)
dst_field = 0
result = gdal.Polygonize(srcband, maskband, dst_layer, dst_field,
['8CONNECTED=8'], callback=None)
return result | python | def raster2shp(rasterfile, vectorshp, layername=None, fieldname=None,
band_num=1, mask='default'):
"""Convert raster to ESRI shapefile"""
FileClass.remove_files(vectorshp)
FileClass.check_file_exists(rasterfile)
# this allows GDAL to throw Python Exceptions
gdal.UseExceptions()
src_ds = gdal.Open(rasterfile)
if src_ds is None:
print('Unable to open %s' % rasterfile)
sys.exit(1)
try:
srcband = src_ds.GetRasterBand(band_num)
except RuntimeError as e:
# for example, try GetRasterBand(10)
print('Band ( %i ) not found, %s' % (band_num, e))
sys.exit(1)
if mask == 'default':
maskband = srcband.GetMaskBand()
elif mask is None or mask.upper() == 'NONE':
maskband = None
else:
mask_ds = gdal.Open(mask)
maskband = mask_ds.GetRasterBand(1)
# create output datasource
if layername is None:
layername = FileClass.get_core_name_without_suffix(rasterfile)
drv = ogr_GetDriverByName(str('ESRI Shapefile'))
dst_ds = drv.CreateDataSource(vectorshp)
srs = None
if src_ds.GetProjection() != '':
srs = osr_SpatialReference()
srs.ImportFromWkt(src_ds.GetProjection())
dst_layer = dst_ds.CreateLayer(str(layername), srs=srs)
if fieldname is None:
fieldname = layername.upper()
fd = ogr_FieldDefn(str(fieldname), OFTInteger)
dst_layer.CreateField(fd)
dst_field = 0
result = gdal.Polygonize(srcband, maskband, dst_layer, dst_field,
['8CONNECTED=8'], callback=None)
return result | [
"def",
"raster2shp",
"(",
"rasterfile",
",",
"vectorshp",
",",
"layername",
"=",
"None",
",",
"fieldname",
"=",
"None",
",",
"band_num",
"=",
"1",
",",
"mask",
"=",
"'default'",
")",
":",
"FileClass",
".",
"remove_files",
"(",
"vectorshp",
")",
"FileClass",
".",
"check_file_exists",
"(",
"rasterfile",
")",
"# this allows GDAL to throw Python Exceptions",
"gdal",
".",
"UseExceptions",
"(",
")",
"src_ds",
"=",
"gdal",
".",
"Open",
"(",
"rasterfile",
")",
"if",
"src_ds",
"is",
"None",
":",
"print",
"(",
"'Unable to open %s'",
"%",
"rasterfile",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"try",
":",
"srcband",
"=",
"src_ds",
".",
"GetRasterBand",
"(",
"band_num",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"# for example, try GetRasterBand(10)",
"print",
"(",
"'Band ( %i ) not found, %s'",
"%",
"(",
"band_num",
",",
"e",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"mask",
"==",
"'default'",
":",
"maskband",
"=",
"srcband",
".",
"GetMaskBand",
"(",
")",
"elif",
"mask",
"is",
"None",
"or",
"mask",
".",
"upper",
"(",
")",
"==",
"'NONE'",
":",
"maskband",
"=",
"None",
"else",
":",
"mask_ds",
"=",
"gdal",
".",
"Open",
"(",
"mask",
")",
"maskband",
"=",
"mask_ds",
".",
"GetRasterBand",
"(",
"1",
")",
"# create output datasource",
"if",
"layername",
"is",
"None",
":",
"layername",
"=",
"FileClass",
".",
"get_core_name_without_suffix",
"(",
"rasterfile",
")",
"drv",
"=",
"ogr_GetDriverByName",
"(",
"str",
"(",
"'ESRI Shapefile'",
")",
")",
"dst_ds",
"=",
"drv",
".",
"CreateDataSource",
"(",
"vectorshp",
")",
"srs",
"=",
"None",
"if",
"src_ds",
".",
"GetProjection",
"(",
")",
"!=",
"''",
":",
"srs",
"=",
"osr_SpatialReference",
"(",
")",
"srs",
".",
"ImportFromWkt",
"(",
"src_ds",
".",
"GetProjection",
"(",
")",
")",
"dst_layer",
"=",
"dst_ds",
".",
"CreateLayer",
"(",
"str",
"(",
"layername",
")",
",",
"srs",
"=",
"srs",
")",
"if",
"fieldname",
"is",
"None",
":",
"fieldname",
"=",
"layername",
".",
"upper",
"(",
")",
"fd",
"=",
"ogr_FieldDefn",
"(",
"str",
"(",
"fieldname",
")",
",",
"OFTInteger",
")",
"dst_layer",
".",
"CreateField",
"(",
"fd",
")",
"dst_field",
"=",
"0",
"result",
"=",
"gdal",
".",
"Polygonize",
"(",
"srcband",
",",
"maskband",
",",
"dst_layer",
",",
"dst_field",
",",
"[",
"'8CONNECTED=8'",
"]",
",",
"callback",
"=",
"None",
")",
"return",
"result"
] | Convert raster to ESRI shapefile | [
"Convert",
"raster",
"to",
"ESRI",
"shapefile"
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/vector.py#L36-L77 | train |
lreis2415/PyGeoC | pygeoc/vector.py | VectorUtilClass.convert2geojson | def convert2geojson(jsonfile, src_srs, dst_srs, src_file):
"""convert shapefile to geojson file"""
if os.path.exists(jsonfile):
os.remove(jsonfile)
if sysstr == 'Windows':
exepath = '"%s/Lib/site-packages/osgeo/ogr2ogr"' % sys.exec_prefix
else:
exepath = FileClass.get_executable_fullpath('ogr2ogr')
# os.system(s)
s = '%s -f GeoJSON -s_srs "%s" -t_srs %s %s %s' % (
exepath, src_srs, dst_srs, jsonfile, src_file)
UtilClass.run_command(s) | python | def convert2geojson(jsonfile, src_srs, dst_srs, src_file):
"""convert shapefile to geojson file"""
if os.path.exists(jsonfile):
os.remove(jsonfile)
if sysstr == 'Windows':
exepath = '"%s/Lib/site-packages/osgeo/ogr2ogr"' % sys.exec_prefix
else:
exepath = FileClass.get_executable_fullpath('ogr2ogr')
# os.system(s)
s = '%s -f GeoJSON -s_srs "%s" -t_srs %s %s %s' % (
exepath, src_srs, dst_srs, jsonfile, src_file)
UtilClass.run_command(s) | [
"def",
"convert2geojson",
"(",
"jsonfile",
",",
"src_srs",
",",
"dst_srs",
",",
"src_file",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"jsonfile",
")",
":",
"os",
".",
"remove",
"(",
"jsonfile",
")",
"if",
"sysstr",
"==",
"'Windows'",
":",
"exepath",
"=",
"'\"%s/Lib/site-packages/osgeo/ogr2ogr\"'",
"%",
"sys",
".",
"exec_prefix",
"else",
":",
"exepath",
"=",
"FileClass",
".",
"get_executable_fullpath",
"(",
"'ogr2ogr'",
")",
"# os.system(s)",
"s",
"=",
"'%s -f GeoJSON -s_srs \"%s\" -t_srs %s %s %s'",
"%",
"(",
"exepath",
",",
"src_srs",
",",
"dst_srs",
",",
"jsonfile",
",",
"src_file",
")",
"UtilClass",
".",
"run_command",
"(",
"s",
")"
] | convert shapefile to geojson file | [
"convert",
"shapefile",
"to",
"geojson",
"file"
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/vector.py#L80-L91 | train |
etal/biofrills | biofrills/consensus.py | consensus | def consensus(aln, weights=None, gap_threshold=0.5, simple=False, trim_ends=True):
"""Get the consensus of an alignment, as a string.
Emit gap characters for majority-gap columns; apply various strategies to
choose the consensus amino acid type for the remaining columns.
Parameters
----------
simple : bool
If True, use simple plurality to determine the consensus amino acid
type, without weighting sequences for similarity. Otherwise, weight
sequences for similarity and use relative entropy to choose the
consensus amino acid type.
weights : dict or None
Sequence weights. If given, used to calculate amino acid frequencies;
otherwise calculated within this function (i.e. this is a way to speed
up the function if sequence weights have already been calculated).
Ignored in 'simple' mode.
trim_ends : bool
If False, stretch the consensus sequence to include the N- and C-tails
of the alignment, even if those flanking columns are mostly gap
characters. This avoids terminal gaps in the consensus (needed for
MAPGAPS).
gap_threshold : float
If the proportion of gap characters in a column is greater than or equal
to this value (after sequence weighting, if applicable), then the
consensus character emitted will be a gap instead of an amino acid type.
"""
# Choose your algorithms!
if simple:
# Use the simple, unweighted algorithm
col_consensus = make_simple_col_consensus(alnutils.aa_frequencies(aln))
def is_majority_gap(col):
return (float(col.count('-')) / len(col) >= gap_threshold)
# ENH (alternatively/additionally): does any aa occur more than once?
# ENH: choose gap-decisionmaking separately from col_consensus
else:
# Use the entropy-based, weighted algorithm
if weights is None:
seq_weights = alnutils.sequence_weights(aln, 'avg1')
else:
seq_weights = weights
aa_frequencies = alnutils.aa_frequencies(aln, weights=seq_weights)
col_consensus = make_entropy_col_consensus(aa_frequencies)
def is_majority_gap(col):
gap_count = 0.0
for wt, char in zip(seq_weights, col):
if char == '-':
gap_count += wt
return (gap_count / sum(seq_weights) >= gap_threshold)
# Traverse the alignment, handling gaps etc.
def col_wise_consensus(columns):
"""Calculate the consensus chars for an iterable of columns."""
if not trim_ends:
# Track if we're in the N-term or C-term end of the sequence
in_left_end = True
maybe_right_tail = []
# prev_col = None
# prev_char = None
for col in columns:
# Lowercase cols mean explicitly, "don't include in consensus"
if all(c.islower() for c in col if c not in '.-'):
yield '-'
continue
if any(c.islower() for c in col):
logging.warn('Mixed lowercase and uppercase letters in a '
'column: ' + ''.join(col))
col = map(str.upper, col)
# Gap chars
is_gap = is_majority_gap(col)
if not trim_ends:
# Avoid N-terminal gaps in the consensus sequence
if in_left_end:
if not is_gap:
# Match -- we're no longer in the left end
in_left_end = False
is_gap = False
# When to yield a gap here:
# ----------- --------- ------ ----------
# in_left_end trim_ends is_gap yield gap?
# ----------- --------- ------ ----------
# True True (True) yes
# True False (False) (no -- def. char)
# False True T/F yes, if is_gap
# False False (T/F) NO! use maybe_right_tail
# ----------- --------- ------ ----------
if is_gap and trim_ends:
yield '-'
continue
# Get the consensus character, using the chosen algorithm
cons_char = col_consensus(col)
if trim_ends:
yield cons_char
else:
# Avoid C-terminal gaps in the consensus sequence
if is_gap:
maybe_right_tail.append(cons_char)
else:
# Match -> gaps weren't the right tail; emit all gaps
for char in maybe_right_tail:
yield '-'
maybe_right_tail = []
yield cons_char
# prev_col = col
# prev_char = cons_char
# Finally, if we were keeping a right (C-term) tail, emit it
if not trim_ends:
for char in maybe_right_tail:
yield char
return ''.join(col_wise_consensus(zip(*aln))) | python | def consensus(aln, weights=None, gap_threshold=0.5, simple=False, trim_ends=True):
"""Get the consensus of an alignment, as a string.
Emit gap characters for majority-gap columns; apply various strategies to
choose the consensus amino acid type for the remaining columns.
Parameters
----------
simple : bool
If True, use simple plurality to determine the consensus amino acid
type, without weighting sequences for similarity. Otherwise, weight
sequences for similarity and use relative entropy to choose the
consensus amino acid type.
weights : dict or None
Sequence weights. If given, used to calculate amino acid frequencies;
otherwise calculated within this function (i.e. this is a way to speed
up the function if sequence weights have already been calculated).
Ignored in 'simple' mode.
trim_ends : bool
If False, stretch the consensus sequence to include the N- and C-tails
of the alignment, even if those flanking columns are mostly gap
characters. This avoids terminal gaps in the consensus (needed for
MAPGAPS).
gap_threshold : float
If the proportion of gap characters in a column is greater than or equal
to this value (after sequence weighting, if applicable), then the
consensus character emitted will be a gap instead of an amino acid type.
"""
# Choose your algorithms!
if simple:
# Use the simple, unweighted algorithm
col_consensus = make_simple_col_consensus(alnutils.aa_frequencies(aln))
def is_majority_gap(col):
return (float(col.count('-')) / len(col) >= gap_threshold)
# ENH (alternatively/additionally): does any aa occur more than once?
# ENH: choose gap-decisionmaking separately from col_consensus
else:
# Use the entropy-based, weighted algorithm
if weights is None:
seq_weights = alnutils.sequence_weights(aln, 'avg1')
else:
seq_weights = weights
aa_frequencies = alnutils.aa_frequencies(aln, weights=seq_weights)
col_consensus = make_entropy_col_consensus(aa_frequencies)
def is_majority_gap(col):
gap_count = 0.0
for wt, char in zip(seq_weights, col):
if char == '-':
gap_count += wt
return (gap_count / sum(seq_weights) >= gap_threshold)
# Traverse the alignment, handling gaps etc.
def col_wise_consensus(columns):
"""Calculate the consensus chars for an iterable of columns."""
if not trim_ends:
# Track if we're in the N-term or C-term end of the sequence
in_left_end = True
maybe_right_tail = []
# prev_col = None
# prev_char = None
for col in columns:
# Lowercase cols mean explicitly, "don't include in consensus"
if all(c.islower() for c in col if c not in '.-'):
yield '-'
continue
if any(c.islower() for c in col):
logging.warn('Mixed lowercase and uppercase letters in a '
'column: ' + ''.join(col))
col = map(str.upper, col)
# Gap chars
is_gap = is_majority_gap(col)
if not trim_ends:
# Avoid N-terminal gaps in the consensus sequence
if in_left_end:
if not is_gap:
# Match -- we're no longer in the left end
in_left_end = False
is_gap = False
# When to yield a gap here:
# ----------- --------- ------ ----------
# in_left_end trim_ends is_gap yield gap?
# ----------- --------- ------ ----------
# True True (True) yes
# True False (False) (no -- def. char)
# False True T/F yes, if is_gap
# False False (T/F) NO! use maybe_right_tail
# ----------- --------- ------ ----------
if is_gap and trim_ends:
yield '-'
continue
# Get the consensus character, using the chosen algorithm
cons_char = col_consensus(col)
if trim_ends:
yield cons_char
else:
# Avoid C-terminal gaps in the consensus sequence
if is_gap:
maybe_right_tail.append(cons_char)
else:
# Match -> gaps weren't the right tail; emit all gaps
for char in maybe_right_tail:
yield '-'
maybe_right_tail = []
yield cons_char
# prev_col = col
# prev_char = cons_char
# Finally, if we were keeping a right (C-term) tail, emit it
if not trim_ends:
for char in maybe_right_tail:
yield char
return ''.join(col_wise_consensus(zip(*aln))) | [
"def",
"consensus",
"(",
"aln",
",",
"weights",
"=",
"None",
",",
"gap_threshold",
"=",
"0.5",
",",
"simple",
"=",
"False",
",",
"trim_ends",
"=",
"True",
")",
":",
"# Choose your algorithms!",
"if",
"simple",
":",
"# Use the simple, unweighted algorithm",
"col_consensus",
"=",
"make_simple_col_consensus",
"(",
"alnutils",
".",
"aa_frequencies",
"(",
"aln",
")",
")",
"def",
"is_majority_gap",
"(",
"col",
")",
":",
"return",
"(",
"float",
"(",
"col",
".",
"count",
"(",
"'-'",
")",
")",
"/",
"len",
"(",
"col",
")",
">=",
"gap_threshold",
")",
"# ENH (alternatively/additionally): does any aa occur more than once?",
"# ENH: choose gap-decisionmaking separately from col_consensus",
"else",
":",
"# Use the entropy-based, weighted algorithm",
"if",
"weights",
"is",
"None",
":",
"seq_weights",
"=",
"alnutils",
".",
"sequence_weights",
"(",
"aln",
",",
"'avg1'",
")",
"else",
":",
"seq_weights",
"=",
"weights",
"aa_frequencies",
"=",
"alnutils",
".",
"aa_frequencies",
"(",
"aln",
",",
"weights",
"=",
"seq_weights",
")",
"col_consensus",
"=",
"make_entropy_col_consensus",
"(",
"aa_frequencies",
")",
"def",
"is_majority_gap",
"(",
"col",
")",
":",
"gap_count",
"=",
"0.0",
"for",
"wt",
",",
"char",
"in",
"zip",
"(",
"seq_weights",
",",
"col",
")",
":",
"if",
"char",
"==",
"'-'",
":",
"gap_count",
"+=",
"wt",
"return",
"(",
"gap_count",
"/",
"sum",
"(",
"seq_weights",
")",
">=",
"gap_threshold",
")",
"# Traverse the alignment, handling gaps etc.",
"def",
"col_wise_consensus",
"(",
"columns",
")",
":",
"\"\"\"Calculate the consensus chars for an iterable of columns.\"\"\"",
"if",
"not",
"trim_ends",
":",
"# Track if we're in the N-term or C-term end of the sequence",
"in_left_end",
"=",
"True",
"maybe_right_tail",
"=",
"[",
"]",
"# prev_col = None",
"# prev_char = None",
"for",
"col",
"in",
"columns",
":",
"# Lowercase cols mean explicitly, \"don't include in consensus\"",
"if",
"all",
"(",
"c",
".",
"islower",
"(",
")",
"for",
"c",
"in",
"col",
"if",
"c",
"not",
"in",
"'.-'",
")",
":",
"yield",
"'-'",
"continue",
"if",
"any",
"(",
"c",
".",
"islower",
"(",
")",
"for",
"c",
"in",
"col",
")",
":",
"logging",
".",
"warn",
"(",
"'Mixed lowercase and uppercase letters in a '",
"'column: '",
"+",
"''",
".",
"join",
"(",
"col",
")",
")",
"col",
"=",
"map",
"(",
"str",
".",
"upper",
",",
"col",
")",
"# Gap chars",
"is_gap",
"=",
"is_majority_gap",
"(",
"col",
")",
"if",
"not",
"trim_ends",
":",
"# Avoid N-terminal gaps in the consensus sequence",
"if",
"in_left_end",
":",
"if",
"not",
"is_gap",
":",
"# Match -- we're no longer in the left end",
"in_left_end",
"=",
"False",
"is_gap",
"=",
"False",
"# When to yield a gap here:",
"# ----------- --------- ------ ----------",
"# in_left_end trim_ends is_gap yield gap?",
"# ----------- --------- ------ ----------",
"# True True (True) yes",
"# True False (False) (no -- def. char)",
"# False True T/F yes, if is_gap",
"# False False (T/F) NO! use maybe_right_tail",
"# ----------- --------- ------ ----------",
"if",
"is_gap",
"and",
"trim_ends",
":",
"yield",
"'-'",
"continue",
"# Get the consensus character, using the chosen algorithm",
"cons_char",
"=",
"col_consensus",
"(",
"col",
")",
"if",
"trim_ends",
":",
"yield",
"cons_char",
"else",
":",
"# Avoid C-terminal gaps in the consensus sequence",
"if",
"is_gap",
":",
"maybe_right_tail",
".",
"append",
"(",
"cons_char",
")",
"else",
":",
"# Match -> gaps weren't the right tail; emit all gaps",
"for",
"char",
"in",
"maybe_right_tail",
":",
"yield",
"'-'",
"maybe_right_tail",
"=",
"[",
"]",
"yield",
"cons_char",
"# prev_col = col",
"# prev_char = cons_char",
"# Finally, if we were keeping a right (C-term) tail, emit it",
"if",
"not",
"trim_ends",
":",
"for",
"char",
"in",
"maybe_right_tail",
":",
"yield",
"char",
"return",
"''",
".",
"join",
"(",
"col_wise_consensus",
"(",
"zip",
"(",
"*",
"aln",
")",
")",
")"
] | Get the consensus of an alignment, as a string.
Emit gap characters for majority-gap columns; apply various strategies to
choose the consensus amino acid type for the remaining columns.
Parameters
----------
simple : bool
If True, use simple plurality to determine the consensus amino acid
type, without weighting sequences for similarity. Otherwise, weight
sequences for similarity and use relative entropy to choose the
consensus amino acid type.
weights : dict or None
Sequence weights. If given, used to calculate amino acid frequencies;
otherwise calculated within this function (i.e. this is a way to speed
up the function if sequence weights have already been calculated).
Ignored in 'simple' mode.
trim_ends : bool
If False, stretch the consensus sequence to include the N- and C-tails
of the alignment, even if those flanking columns are mostly gap
characters. This avoids terminal gaps in the consensus (needed for
MAPGAPS).
gap_threshold : float
If the proportion of gap characters in a column is greater than or equal
to this value (after sequence weighting, if applicable), then the
consensus character emitted will be a gap instead of an amino acid type. | [
"Get",
"the",
"consensus",
"of",
"an",
"alignment",
"as",
"a",
"string",
"."
] | 36684bb6c7632f96215e8b2b4ebc86640f331bcd | https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/consensus.py#L10-L130 | train |
etal/biofrills | biofrills/consensus.py | make_simple_col_consensus | def make_simple_col_consensus(bg_freqs):
"""Consensus by simple plurality, unweighted.
Resolves ties by two heuristics:
1. Prefer the aa that follows the preceding consensus aa type most often
in the original sequences.
2. Finally, prefer the less-common aa type.
"""
# Hack: use default kwargs to persist across iterations
def col_consensus(col, prev_col=[], prev_char=[]):
# Count the amino acid types in this column
aa_counts = sequtils.aa_frequencies(col)
assert aa_counts, "Column is all gaps! That's not allowed."
# Take the most common residue(s)
best_char, best_score = max(aa_counts.iteritems(),
key=lambda kv: kv[1])
# Resolve ties
ties = [aa for aa in aa_counts if aa_counts[aa] == best_score]
if len(ties) > 1:
# Breaker #1: most common after the prev. consensus char
# Resolve a tied col by restricting to rows where the preceding
# char is the consensus type for that (preceding) col
if prev_char and prev_col:
mc_next = Counter(
[b for a, b in zip(prev_col, col)
if a == prev_char[0] and b in ties]
).most_common()
ties_next = [x[0] for x in mc_next
if x[1] == mc_next[0][1]]
if ties_next:
ties = ties_next
if len(ties) > 1:
# Breaker #2: lowest overall residue frequency
ties.sort(key=lambda aa: bg_freqs[aa])
best_char = ties[0]
else:
assert best_char == ties[0], \
'WTF %s != %s[0]' % (best_char, ties)
# Save values for tie-breaker #1
prev_col[:] = col
prev_char[:] = best_char
return best_char
return col_consensus | python | def make_simple_col_consensus(bg_freqs):
"""Consensus by simple plurality, unweighted.
Resolves ties by two heuristics:
1. Prefer the aa that follows the preceding consensus aa type most often
in the original sequences.
2. Finally, prefer the less-common aa type.
"""
# Hack: use default kwargs to persist across iterations
def col_consensus(col, prev_col=[], prev_char=[]):
# Count the amino acid types in this column
aa_counts = sequtils.aa_frequencies(col)
assert aa_counts, "Column is all gaps! That's not allowed."
# Take the most common residue(s)
best_char, best_score = max(aa_counts.iteritems(),
key=lambda kv: kv[1])
# Resolve ties
ties = [aa for aa in aa_counts if aa_counts[aa] == best_score]
if len(ties) > 1:
# Breaker #1: most common after the prev. consensus char
# Resolve a tied col by restricting to rows where the preceding
# char is the consensus type for that (preceding) col
if prev_char and prev_col:
mc_next = Counter(
[b for a, b in zip(prev_col, col)
if a == prev_char[0] and b in ties]
).most_common()
ties_next = [x[0] for x in mc_next
if x[1] == mc_next[0][1]]
if ties_next:
ties = ties_next
if len(ties) > 1:
# Breaker #2: lowest overall residue frequency
ties.sort(key=lambda aa: bg_freqs[aa])
best_char = ties[0]
else:
assert best_char == ties[0], \
'WTF %s != %s[0]' % (best_char, ties)
# Save values for tie-breaker #1
prev_col[:] = col
prev_char[:] = best_char
return best_char
return col_consensus | [
"def",
"make_simple_col_consensus",
"(",
"bg_freqs",
")",
":",
"# Hack: use default kwargs to persist across iterations",
"def",
"col_consensus",
"(",
"col",
",",
"prev_col",
"=",
"[",
"]",
",",
"prev_char",
"=",
"[",
"]",
")",
":",
"# Count the amino acid types in this column",
"aa_counts",
"=",
"sequtils",
".",
"aa_frequencies",
"(",
"col",
")",
"assert",
"aa_counts",
",",
"\"Column is all gaps! That's not allowed.\"",
"# Take the most common residue(s)",
"best_char",
",",
"best_score",
"=",
"max",
"(",
"aa_counts",
".",
"iteritems",
"(",
")",
",",
"key",
"=",
"lambda",
"kv",
":",
"kv",
"[",
"1",
"]",
")",
"# Resolve ties",
"ties",
"=",
"[",
"aa",
"for",
"aa",
"in",
"aa_counts",
"if",
"aa_counts",
"[",
"aa",
"]",
"==",
"best_score",
"]",
"if",
"len",
"(",
"ties",
")",
">",
"1",
":",
"# Breaker #1: most common after the prev. consensus char",
"# Resolve a tied col by restricting to rows where the preceding",
"# char is the consensus type for that (preceding) col",
"if",
"prev_char",
"and",
"prev_col",
":",
"mc_next",
"=",
"Counter",
"(",
"[",
"b",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"prev_col",
",",
"col",
")",
"if",
"a",
"==",
"prev_char",
"[",
"0",
"]",
"and",
"b",
"in",
"ties",
"]",
")",
".",
"most_common",
"(",
")",
"ties_next",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"mc_next",
"if",
"x",
"[",
"1",
"]",
"==",
"mc_next",
"[",
"0",
"]",
"[",
"1",
"]",
"]",
"if",
"ties_next",
":",
"ties",
"=",
"ties_next",
"if",
"len",
"(",
"ties",
")",
">",
"1",
":",
"# Breaker #2: lowest overall residue frequency",
"ties",
".",
"sort",
"(",
"key",
"=",
"lambda",
"aa",
":",
"bg_freqs",
"[",
"aa",
"]",
")",
"best_char",
"=",
"ties",
"[",
"0",
"]",
"else",
":",
"assert",
"best_char",
"==",
"ties",
"[",
"0",
"]",
",",
"'WTF %s != %s[0]'",
"%",
"(",
"best_char",
",",
"ties",
")",
"# Save values for tie-breaker #1",
"prev_col",
"[",
":",
"]",
"=",
"col",
"prev_char",
"[",
":",
"]",
"=",
"best_char",
"return",
"best_char",
"return",
"col_consensus"
] | Consensus by simple plurality, unweighted.
Resolves ties by two heuristics:
1. Prefer the aa that follows the preceding consensus aa type most often
in the original sequences.
2. Finally, prefer the less-common aa type. | [
"Consensus",
"by",
"simple",
"plurality",
"unweighted",
"."
] | 36684bb6c7632f96215e8b2b4ebc86640f331bcd | https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/consensus.py#L166-L209 | train |
etal/biofrills | biofrills/consensus.py | supported | def supported(aln):
"""Get only the supported consensus residues in each column.
Meaning:
- Omit majority-gap columns
- Omit columns where no residue type appears more than once
- In case of a tie, return all the top-scoring residue types
(no prioritization)
Returns a *list* -- not a string! -- where elements are strings of the
consensus character(s), potentially a gap ('-') or multiple chars ('KR').
"""
def col_consensus(columns):
"""Calculate the consensus chars for an iterable of columns."""
for col in columns:
if (# Majority gap chars
(col.count('-') >= len(col)/2) or
# Lowercase cols mean "don't include in consensus"
all(c.islower() for c in col if c not in '.-')
):
yield '-'
continue
# Validation - copied from consensus() above
if any(c.islower() for c in col):
logging.warn('Mixed lowercase and uppercase letters in a '
'column: ' + ''.join(col))
col = map(str.upper, col)
# Calculate the consensus character
most_common = Counter(
[c for c in col if c not in '-']
).most_common()
if not most_common:
# XXX ever reached?
logging.warn("Column is all gaps! How did that happen?")
if most_common[0][1] == 1:
# No char has frequency > 1; no consensus char
yield '-'
elif (len(most_common) > 1 and
most_common[0][1] == most_common[1][1]):
# Tie for most-common residue type
ties = [x[0] for x in most_common
if x[1] == most_common[0][1]]
yield ''.join(ties)
else:
yield most_common[0][0]
return list(col_consensus(zip(*aln))) | python | def supported(aln):
"""Get only the supported consensus residues in each column.
Meaning:
- Omit majority-gap columns
- Omit columns where no residue type appears more than once
- In case of a tie, return all the top-scoring residue types
(no prioritization)
Returns a *list* -- not a string! -- where elements are strings of the
consensus character(s), potentially a gap ('-') or multiple chars ('KR').
"""
def col_consensus(columns):
"""Calculate the consensus chars for an iterable of columns."""
for col in columns:
if (# Majority gap chars
(col.count('-') >= len(col)/2) or
# Lowercase cols mean "don't include in consensus"
all(c.islower() for c in col if c not in '.-')
):
yield '-'
continue
# Validation - copied from consensus() above
if any(c.islower() for c in col):
logging.warn('Mixed lowercase and uppercase letters in a '
'column: ' + ''.join(col))
col = map(str.upper, col)
# Calculate the consensus character
most_common = Counter(
[c for c in col if c not in '-']
).most_common()
if not most_common:
# XXX ever reached?
logging.warn("Column is all gaps! How did that happen?")
if most_common[0][1] == 1:
# No char has frequency > 1; no consensus char
yield '-'
elif (len(most_common) > 1 and
most_common[0][1] == most_common[1][1]):
# Tie for most-common residue type
ties = [x[0] for x in most_common
if x[1] == most_common[0][1]]
yield ''.join(ties)
else:
yield most_common[0][0]
return list(col_consensus(zip(*aln))) | [
"def",
"supported",
"(",
"aln",
")",
":",
"def",
"col_consensus",
"(",
"columns",
")",
":",
"\"\"\"Calculate the consensus chars for an iterable of columns.\"\"\"",
"for",
"col",
"in",
"columns",
":",
"if",
"(",
"# Majority gap chars",
"(",
"col",
".",
"count",
"(",
"'-'",
")",
">=",
"len",
"(",
"col",
")",
"/",
"2",
")",
"or",
"# Lowercase cols mean \"don't include in consensus\"",
"all",
"(",
"c",
".",
"islower",
"(",
")",
"for",
"c",
"in",
"col",
"if",
"c",
"not",
"in",
"'.-'",
")",
")",
":",
"yield",
"'-'",
"continue",
"# Validation - copied from consensus() above",
"if",
"any",
"(",
"c",
".",
"islower",
"(",
")",
"for",
"c",
"in",
"col",
")",
":",
"logging",
".",
"warn",
"(",
"'Mixed lowercase and uppercase letters in a '",
"'column: '",
"+",
"''",
".",
"join",
"(",
"col",
")",
")",
"col",
"=",
"map",
"(",
"str",
".",
"upper",
",",
"col",
")",
"# Calculate the consensus character",
"most_common",
"=",
"Counter",
"(",
"[",
"c",
"for",
"c",
"in",
"col",
"if",
"c",
"not",
"in",
"'-'",
"]",
")",
".",
"most_common",
"(",
")",
"if",
"not",
"most_common",
":",
"# XXX ever reached?",
"logging",
".",
"warn",
"(",
"\"Column is all gaps! How did that happen?\"",
")",
"if",
"most_common",
"[",
"0",
"]",
"[",
"1",
"]",
"==",
"1",
":",
"# No char has frequency > 1; no consensus char",
"yield",
"'-'",
"elif",
"(",
"len",
"(",
"most_common",
")",
">",
"1",
"and",
"most_common",
"[",
"0",
"]",
"[",
"1",
"]",
"==",
"most_common",
"[",
"1",
"]",
"[",
"1",
"]",
")",
":",
"# Tie for most-common residue type",
"ties",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"most_common",
"if",
"x",
"[",
"1",
"]",
"==",
"most_common",
"[",
"0",
"]",
"[",
"1",
"]",
"]",
"yield",
"''",
".",
"join",
"(",
"ties",
")",
"else",
":",
"yield",
"most_common",
"[",
"0",
"]",
"[",
"0",
"]",
"return",
"list",
"(",
"col_consensus",
"(",
"zip",
"(",
"*",
"aln",
")",
")",
")"
] | Get only the supported consensus residues in each column.
Meaning:
- Omit majority-gap columns
- Omit columns where no residue type appears more than once
- In case of a tie, return all the top-scoring residue types
(no prioritization)
Returns a *list* -- not a string! -- where elements are strings of the
consensus character(s), potentially a gap ('-') or multiple chars ('KR'). | [
"Get",
"only",
"the",
"supported",
"consensus",
"residues",
"in",
"each",
"column",
"."
] | 36684bb6c7632f96215e8b2b4ebc86640f331bcd | https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/consensus.py#L215-L261 | train |
idlesign/django-sitecats | sitecats/templatetags/sitecats.py | detect_clause | def detect_clause(parser, clause_name, tokens, as_filter_expr=True):
"""Helper function detects a certain clause in tag tokens list.
Returns its value.
"""
if clause_name in tokens:
t_index = tokens.index(clause_name)
clause_value = tokens[t_index + 1]
if as_filter_expr:
clause_value = parser.compile_filter(clause_value)
del tokens[t_index:t_index + 2]
else:
clause_value = None
return clause_value | python | def detect_clause(parser, clause_name, tokens, as_filter_expr=True):
"""Helper function detects a certain clause in tag tokens list.
Returns its value.
"""
if clause_name in tokens:
t_index = tokens.index(clause_name)
clause_value = tokens[t_index + 1]
if as_filter_expr:
clause_value = parser.compile_filter(clause_value)
del tokens[t_index:t_index + 2]
else:
clause_value = None
return clause_value | [
"def",
"detect_clause",
"(",
"parser",
",",
"clause_name",
",",
"tokens",
",",
"as_filter_expr",
"=",
"True",
")",
":",
"if",
"clause_name",
"in",
"tokens",
":",
"t_index",
"=",
"tokens",
".",
"index",
"(",
"clause_name",
")",
"clause_value",
"=",
"tokens",
"[",
"t_index",
"+",
"1",
"]",
"if",
"as_filter_expr",
":",
"clause_value",
"=",
"parser",
".",
"compile_filter",
"(",
"clause_value",
")",
"del",
"tokens",
"[",
"t_index",
":",
"t_index",
"+",
"2",
"]",
"else",
":",
"clause_value",
"=",
"None",
"return",
"clause_value"
] | Helper function detects a certain clause in tag tokens list.
Returns its value. | [
"Helper",
"function",
"detects",
"a",
"certain",
"clause",
"in",
"tag",
"tokens",
"list",
".",
"Returns",
"its",
"value",
"."
] | 9b45e91fc0dcb63a0011780437fe28145e3ecce9 | https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/templatetags/sitecats.py#L102-L115 | train |
toros-astro/corral | corral/cli/commands.py | Notebook.install_kernel_spec | def install_kernel_spec(self, app, dir_name, display_name,
settings_module, ipython_arguments):
"""install an IPython >= 3.0 kernelspec that loads corral env
Thanks: django extensions
"""
ksm = app.kernel_spec_manager
try_spec_names = ['python3' if six.PY3 else 'python2', 'python']
if isinstance(try_spec_names, six.string_types):
try_spec_names = [try_spec_names]
ks = None
for spec_name in try_spec_names:
try:
ks = ksm.get_kernel_spec(spec_name)
break
except Exception:
continue
if not ks:
self.parser.error("No notebook (Python) kernel specs found")
ks.display_name = display_name
ks.env["CORRAL_SETTINGS_MODULE"] = settings_module
ks.argv.extend(ipython_arguments)
in_corral_dir, in_corral = os.path.split(os.path.realpath(sys.argv[0]))
pythonpath = ks.env.get(
'PYTHONPATH', os.environ.get('PYTHONPATH', ''))
pythonpath = pythonpath.split(':')
if in_corral_dir not in pythonpath:
pythonpath.append(in_corral_dir)
ks.env['PYTHONPATH'] = ':'.join(filter(None, pythonpath))
kernel_dir = os.path.join(ksm.user_kernel_dir, conf.PACKAGE)
if not os.path.exists(kernel_dir):
os.makedirs(kernel_dir)
shutil.copy(res.fullpath("logo-64x64.png"), kernel_dir)
with open(os.path.join(kernel_dir, 'kernel.json'), 'w') as f:
f.write(ks.to_json()) | python | def install_kernel_spec(self, app, dir_name, display_name,
settings_module, ipython_arguments):
"""install an IPython >= 3.0 kernelspec that loads corral env
Thanks: django extensions
"""
ksm = app.kernel_spec_manager
try_spec_names = ['python3' if six.PY3 else 'python2', 'python']
if isinstance(try_spec_names, six.string_types):
try_spec_names = [try_spec_names]
ks = None
for spec_name in try_spec_names:
try:
ks = ksm.get_kernel_spec(spec_name)
break
except Exception:
continue
if not ks:
self.parser.error("No notebook (Python) kernel specs found")
ks.display_name = display_name
ks.env["CORRAL_SETTINGS_MODULE"] = settings_module
ks.argv.extend(ipython_arguments)
in_corral_dir, in_corral = os.path.split(os.path.realpath(sys.argv[0]))
pythonpath = ks.env.get(
'PYTHONPATH', os.environ.get('PYTHONPATH', ''))
pythonpath = pythonpath.split(':')
if in_corral_dir not in pythonpath:
pythonpath.append(in_corral_dir)
ks.env['PYTHONPATH'] = ':'.join(filter(None, pythonpath))
kernel_dir = os.path.join(ksm.user_kernel_dir, conf.PACKAGE)
if not os.path.exists(kernel_dir):
os.makedirs(kernel_dir)
shutil.copy(res.fullpath("logo-64x64.png"), kernel_dir)
with open(os.path.join(kernel_dir, 'kernel.json'), 'w') as f:
f.write(ks.to_json()) | [
"def",
"install_kernel_spec",
"(",
"self",
",",
"app",
",",
"dir_name",
",",
"display_name",
",",
"settings_module",
",",
"ipython_arguments",
")",
":",
"ksm",
"=",
"app",
".",
"kernel_spec_manager",
"try_spec_names",
"=",
"[",
"'python3'",
"if",
"six",
".",
"PY3",
"else",
"'python2'",
",",
"'python'",
"]",
"if",
"isinstance",
"(",
"try_spec_names",
",",
"six",
".",
"string_types",
")",
":",
"try_spec_names",
"=",
"[",
"try_spec_names",
"]",
"ks",
"=",
"None",
"for",
"spec_name",
"in",
"try_spec_names",
":",
"try",
":",
"ks",
"=",
"ksm",
".",
"get_kernel_spec",
"(",
"spec_name",
")",
"break",
"except",
"Exception",
":",
"continue",
"if",
"not",
"ks",
":",
"self",
".",
"parser",
".",
"error",
"(",
"\"No notebook (Python) kernel specs found\"",
")",
"ks",
".",
"display_name",
"=",
"display_name",
"ks",
".",
"env",
"[",
"\"CORRAL_SETTINGS_MODULE\"",
"]",
"=",
"settings_module",
"ks",
".",
"argv",
".",
"extend",
"(",
"ipython_arguments",
")",
"in_corral_dir",
",",
"in_corral",
"=",
"os",
".",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
")",
"pythonpath",
"=",
"ks",
".",
"env",
".",
"get",
"(",
"'PYTHONPATH'",
",",
"os",
".",
"environ",
".",
"get",
"(",
"'PYTHONPATH'",
",",
"''",
")",
")",
"pythonpath",
"=",
"pythonpath",
".",
"split",
"(",
"':'",
")",
"if",
"in_corral_dir",
"not",
"in",
"pythonpath",
":",
"pythonpath",
".",
"append",
"(",
"in_corral_dir",
")",
"ks",
".",
"env",
"[",
"'PYTHONPATH'",
"]",
"=",
"':'",
".",
"join",
"(",
"filter",
"(",
"None",
",",
"pythonpath",
")",
")",
"kernel_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"ksm",
".",
"user_kernel_dir",
",",
"conf",
".",
"PACKAGE",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"kernel_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"kernel_dir",
")",
"shutil",
".",
"copy",
"(",
"res",
".",
"fullpath",
"(",
"\"logo-64x64.png\"",
")",
",",
"kernel_dir",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"kernel_dir",
",",
"'kernel.json'",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"ks",
".",
"to_json",
"(",
")",
")"
] | install an IPython >= 3.0 kernelspec that loads corral env
Thanks: django extensions | [
"install",
"an",
"IPython",
">",
"=",
"3",
".",
"0",
"kernelspec",
"that",
"loads",
"corral",
"env"
] | 75474b38ff366330d33644461a902d07374a5bbc | https://github.com/toros-astro/corral/blob/75474b38ff366330d33644461a902d07374a5bbc/corral/cli/commands.py#L250-L289 | train |
idlesign/django-sitecats | sitecats/utils.py | Cache._cache_init | def _cache_init(self):
"""Initializes local cache from Django cache if required."""
cache_ = cache.get(self.CACHE_ENTRY_NAME)
if cache_ is None:
categories = get_category_model().objects.order_by('sort_order')
ids = {category.id: category for category in categories}
aliases = {category.alias: category for category in categories if category.alias}
parent_to_children = OrderedDict() # Preserve aliases order.
for category in categories:
parent_category = ids.get(category.parent_id, False)
parent_alias = None
if parent_category:
parent_alias = parent_category.alias
if parent_alias not in parent_to_children:
parent_to_children[parent_alias] = []
parent_to_children[parent_alias].append(category.id)
cache_ = {
self.CACHE_NAME_IDS: ids,
self.CACHE_NAME_PARENTS: parent_to_children,
self.CACHE_NAME_ALIASES: aliases
}
cache.set(self.CACHE_ENTRY_NAME, cache_, self.CACHE_TIMEOUT)
self._cache = cache_ | python | def _cache_init(self):
"""Initializes local cache from Django cache if required."""
cache_ = cache.get(self.CACHE_ENTRY_NAME)
if cache_ is None:
categories = get_category_model().objects.order_by('sort_order')
ids = {category.id: category for category in categories}
aliases = {category.alias: category for category in categories if category.alias}
parent_to_children = OrderedDict() # Preserve aliases order.
for category in categories:
parent_category = ids.get(category.parent_id, False)
parent_alias = None
if parent_category:
parent_alias = parent_category.alias
if parent_alias not in parent_to_children:
parent_to_children[parent_alias] = []
parent_to_children[parent_alias].append(category.id)
cache_ = {
self.CACHE_NAME_IDS: ids,
self.CACHE_NAME_PARENTS: parent_to_children,
self.CACHE_NAME_ALIASES: aliases
}
cache.set(self.CACHE_ENTRY_NAME, cache_, self.CACHE_TIMEOUT)
self._cache = cache_ | [
"def",
"_cache_init",
"(",
"self",
")",
":",
"cache_",
"=",
"cache",
".",
"get",
"(",
"self",
".",
"CACHE_ENTRY_NAME",
")",
"if",
"cache_",
"is",
"None",
":",
"categories",
"=",
"get_category_model",
"(",
")",
".",
"objects",
".",
"order_by",
"(",
"'sort_order'",
")",
"ids",
"=",
"{",
"category",
".",
"id",
":",
"category",
"for",
"category",
"in",
"categories",
"}",
"aliases",
"=",
"{",
"category",
".",
"alias",
":",
"category",
"for",
"category",
"in",
"categories",
"if",
"category",
".",
"alias",
"}",
"parent_to_children",
"=",
"OrderedDict",
"(",
")",
"# Preserve aliases order.",
"for",
"category",
"in",
"categories",
":",
"parent_category",
"=",
"ids",
".",
"get",
"(",
"category",
".",
"parent_id",
",",
"False",
")",
"parent_alias",
"=",
"None",
"if",
"parent_category",
":",
"parent_alias",
"=",
"parent_category",
".",
"alias",
"if",
"parent_alias",
"not",
"in",
"parent_to_children",
":",
"parent_to_children",
"[",
"parent_alias",
"]",
"=",
"[",
"]",
"parent_to_children",
"[",
"parent_alias",
"]",
".",
"append",
"(",
"category",
".",
"id",
")",
"cache_",
"=",
"{",
"self",
".",
"CACHE_NAME_IDS",
":",
"ids",
",",
"self",
".",
"CACHE_NAME_PARENTS",
":",
"parent_to_children",
",",
"self",
".",
"CACHE_NAME_ALIASES",
":",
"aliases",
"}",
"cache",
".",
"set",
"(",
"self",
".",
"CACHE_ENTRY_NAME",
",",
"cache_",
",",
"self",
".",
"CACHE_TIMEOUT",
")",
"self",
".",
"_cache",
"=",
"cache_"
] | Initializes local cache from Django cache if required. | [
"Initializes",
"local",
"cache",
"from",
"Django",
"cache",
"if",
"required",
"."
] | 9b45e91fc0dcb63a0011780437fe28145e3ecce9 | https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/utils.py#L57-L85 | train |
idlesign/django-sitecats | sitecats/utils.py | Cache._cache_get_entry | def _cache_get_entry(self, entry_name, key=ENTIRE_ENTRY_KEY, default=False):
"""Returns cache entry parameter value by its name.
:param str entry_name:
:param str key:
:param type default:
:return:
"""
if key is self.ENTIRE_ENTRY_KEY:
return self._cache[entry_name]
return self._cache[entry_name].get(key, default) | python | def _cache_get_entry(self, entry_name, key=ENTIRE_ENTRY_KEY, default=False):
"""Returns cache entry parameter value by its name.
:param str entry_name:
:param str key:
:param type default:
:return:
"""
if key is self.ENTIRE_ENTRY_KEY:
return self._cache[entry_name]
return self._cache[entry_name].get(key, default) | [
"def",
"_cache_get_entry",
"(",
"self",
",",
"entry_name",
",",
"key",
"=",
"ENTIRE_ENTRY_KEY",
",",
"default",
"=",
"False",
")",
":",
"if",
"key",
"is",
"self",
".",
"ENTIRE_ENTRY_KEY",
":",
"return",
"self",
".",
"_cache",
"[",
"entry_name",
"]",
"return",
"self",
".",
"_cache",
"[",
"entry_name",
"]",
".",
"get",
"(",
"key",
",",
"default",
")"
] | Returns cache entry parameter value by its name.
:param str entry_name:
:param str key:
:param type default:
:return: | [
"Returns",
"cache",
"entry",
"parameter",
"value",
"by",
"its",
"name",
"."
] | 9b45e91fc0dcb63a0011780437fe28145e3ecce9 | https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/utils.py#L94-L104 | train |
idlesign/django-sitecats | sitecats/utils.py | Cache.sort_aliases | def sort_aliases(self, aliases):
"""Sorts the given aliases list, returns a sorted list.
:param list aliases:
:return: sorted aliases list
"""
self._cache_init()
if not aliases:
return aliases
parent_aliases = self._cache_get_entry(self.CACHE_NAME_PARENTS).keys()
return [parent_alias for parent_alias in parent_aliases if parent_alias in aliases] | python | def sort_aliases(self, aliases):
"""Sorts the given aliases list, returns a sorted list.
:param list aliases:
:return: sorted aliases list
"""
self._cache_init()
if not aliases:
return aliases
parent_aliases = self._cache_get_entry(self.CACHE_NAME_PARENTS).keys()
return [parent_alias for parent_alias in parent_aliases if parent_alias in aliases] | [
"def",
"sort_aliases",
"(",
"self",
",",
"aliases",
")",
":",
"self",
".",
"_cache_init",
"(",
")",
"if",
"not",
"aliases",
":",
"return",
"aliases",
"parent_aliases",
"=",
"self",
".",
"_cache_get_entry",
"(",
"self",
".",
"CACHE_NAME_PARENTS",
")",
".",
"keys",
"(",
")",
"return",
"[",
"parent_alias",
"for",
"parent_alias",
"in",
"parent_aliases",
"if",
"parent_alias",
"in",
"aliases",
"]"
] | Sorts the given aliases list, returns a sorted list.
:param list aliases:
:return: sorted aliases list | [
"Sorts",
"the",
"given",
"aliases",
"list",
"returns",
"a",
"sorted",
"list",
"."
] | 9b45e91fc0dcb63a0011780437fe28145e3ecce9 | https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/utils.py#L106-L116 | train |
idlesign/django-sitecats | sitecats/utils.py | Cache.get_parents_for | def get_parents_for(self, child_ids):
"""Returns parent aliases for a list of child IDs.
:param list child_ids:
:rtype: set
:return: a set of parent aliases
"""
self._cache_init()
parent_candidates = []
for parent, children in self._cache_get_entry(self.CACHE_NAME_PARENTS).items():
if set(children).intersection(child_ids):
parent_candidates.append(parent)
return set(parent_candidates) | python | def get_parents_for(self, child_ids):
"""Returns parent aliases for a list of child IDs.
:param list child_ids:
:rtype: set
:return: a set of parent aliases
"""
self._cache_init()
parent_candidates = []
for parent, children in self._cache_get_entry(self.CACHE_NAME_PARENTS).items():
if set(children).intersection(child_ids):
parent_candidates.append(parent)
return set(parent_candidates) | [
"def",
"get_parents_for",
"(",
"self",
",",
"child_ids",
")",
":",
"self",
".",
"_cache_init",
"(",
")",
"parent_candidates",
"=",
"[",
"]",
"for",
"parent",
",",
"children",
"in",
"self",
".",
"_cache_get_entry",
"(",
"self",
".",
"CACHE_NAME_PARENTS",
")",
".",
"items",
"(",
")",
":",
"if",
"set",
"(",
"children",
")",
".",
"intersection",
"(",
"child_ids",
")",
":",
"parent_candidates",
".",
"append",
"(",
"parent",
")",
"return",
"set",
"(",
"parent_candidates",
")"
] | Returns parent aliases for a list of child IDs.
:param list child_ids:
:rtype: set
:return: a set of parent aliases | [
"Returns",
"parent",
"aliases",
"for",
"a",
"list",
"of",
"child",
"IDs",
"."
] | 9b45e91fc0dcb63a0011780437fe28145e3ecce9 | https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/utils.py#L118-L130 | train |
idlesign/django-sitecats | sitecats/utils.py | Cache.get_children_for | def get_children_for(self, parent_alias=None, only_with_aliases=False):
"""Returns a list with with categories under the given parent.
:param str|None parent_alias: Parent category alias or None for categories under root
:param bool only_with_aliases: Flag to return only children with aliases
:return: a list of category objects
"""
self._cache_init()
child_ids = self.get_child_ids(parent_alias)
if only_with_aliases:
children = []
for cid in child_ids:
category = self.get_category_by_id(cid)
if category.alias:
children.append(category)
return children
return [self.get_category_by_id(cid) for cid in child_ids] | python | def get_children_for(self, parent_alias=None, only_with_aliases=False):
"""Returns a list with with categories under the given parent.
:param str|None parent_alias: Parent category alias or None for categories under root
:param bool only_with_aliases: Flag to return only children with aliases
:return: a list of category objects
"""
self._cache_init()
child_ids = self.get_child_ids(parent_alias)
if only_with_aliases:
children = []
for cid in child_ids:
category = self.get_category_by_id(cid)
if category.alias:
children.append(category)
return children
return [self.get_category_by_id(cid) for cid in child_ids] | [
"def",
"get_children_for",
"(",
"self",
",",
"parent_alias",
"=",
"None",
",",
"only_with_aliases",
"=",
"False",
")",
":",
"self",
".",
"_cache_init",
"(",
")",
"child_ids",
"=",
"self",
".",
"get_child_ids",
"(",
"parent_alias",
")",
"if",
"only_with_aliases",
":",
"children",
"=",
"[",
"]",
"for",
"cid",
"in",
"child_ids",
":",
"category",
"=",
"self",
".",
"get_category_by_id",
"(",
"cid",
")",
"if",
"category",
".",
"alias",
":",
"children",
".",
"append",
"(",
"category",
")",
"return",
"children",
"return",
"[",
"self",
".",
"get_category_by_id",
"(",
"cid",
")",
"for",
"cid",
"in",
"child_ids",
"]"
] | Returns a list with with categories under the given parent.
:param str|None parent_alias: Parent category alias or None for categories under root
:param bool only_with_aliases: Flag to return only children with aliases
:return: a list of category objects | [
"Returns",
"a",
"list",
"with",
"with",
"categories",
"under",
"the",
"given",
"parent",
"."
] | 9b45e91fc0dcb63a0011780437fe28145e3ecce9 | https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/utils.py#L132-L148 | train |
idlesign/django-sitecats | sitecats/utils.py | Cache.get_child_ids | def get_child_ids(self, parent_alias):
"""Returns child IDs of the given parent category
:param str parent_alias: Parent category alias
:rtype: list
:return: a list of child IDs
"""
self._cache_init()
return self._cache_get_entry(self.CACHE_NAME_PARENTS, parent_alias, []) | python | def get_child_ids(self, parent_alias):
"""Returns child IDs of the given parent category
:param str parent_alias: Parent category alias
:rtype: list
:return: a list of child IDs
"""
self._cache_init()
return self._cache_get_entry(self.CACHE_NAME_PARENTS, parent_alias, []) | [
"def",
"get_child_ids",
"(",
"self",
",",
"parent_alias",
")",
":",
"self",
".",
"_cache_init",
"(",
")",
"return",
"self",
".",
"_cache_get_entry",
"(",
"self",
".",
"CACHE_NAME_PARENTS",
",",
"parent_alias",
",",
"[",
"]",
")"
] | Returns child IDs of the given parent category
:param str parent_alias: Parent category alias
:rtype: list
:return: a list of child IDs | [
"Returns",
"child",
"IDs",
"of",
"the",
"given",
"parent",
"category"
] | 9b45e91fc0dcb63a0011780437fe28145e3ecce9 | https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/utils.py#L150-L158 | train |
idlesign/django-sitecats | sitecats/utils.py | Cache.get_category_by_alias | def get_category_by_alias(self, alias):
"""Returns Category object by its alias.
:param str alias:
:rtype: Category|None
:return: category object
"""
self._cache_init()
return self._cache_get_entry(self.CACHE_NAME_ALIASES, alias, None) | python | def get_category_by_alias(self, alias):
"""Returns Category object by its alias.
:param str alias:
:rtype: Category|None
:return: category object
"""
self._cache_init()
return self._cache_get_entry(self.CACHE_NAME_ALIASES, alias, None) | [
"def",
"get_category_by_alias",
"(",
"self",
",",
"alias",
")",
":",
"self",
".",
"_cache_init",
"(",
")",
"return",
"self",
".",
"_cache_get_entry",
"(",
"self",
".",
"CACHE_NAME_ALIASES",
",",
"alias",
",",
"None",
")"
] | Returns Category object by its alias.
:param str alias:
:rtype: Category|None
:return: category object | [
"Returns",
"Category",
"object",
"by",
"its",
"alias",
"."
] | 9b45e91fc0dcb63a0011780437fe28145e3ecce9 | https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/utils.py#L160-L168 | train |
idlesign/django-sitecats | sitecats/utils.py | Cache.get_category_by_id | def get_category_by_id(self, cid):
"""Returns Category object by its id.
:param str cid:
:rtype: Category
:return: category object
"""
self._cache_init()
return self._cache_get_entry(self.CACHE_NAME_IDS, cid) | python | def get_category_by_id(self, cid):
"""Returns Category object by its id.
:param str cid:
:rtype: Category
:return: category object
"""
self._cache_init()
return self._cache_get_entry(self.CACHE_NAME_IDS, cid) | [
"def",
"get_category_by_id",
"(",
"self",
",",
"cid",
")",
":",
"self",
".",
"_cache_init",
"(",
")",
"return",
"self",
".",
"_cache_get_entry",
"(",
"self",
".",
"CACHE_NAME_IDS",
",",
"cid",
")"
] | Returns Category object by its id.
:param str cid:
:rtype: Category
:return: category object | [
"Returns",
"Category",
"object",
"by",
"its",
"id",
"."
] | 9b45e91fc0dcb63a0011780437fe28145e3ecce9 | https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/utils.py#L170-L178 | train |
idlesign/django-sitecats | sitecats/utils.py | Cache.get_ties_stats | def get_ties_stats(self, categories, target_model=None):
"""Returns a dict with categories popularity stats.
:param list categories:
:param Model|None target_model:
:return:
"""
filter_kwargs = {
'category_id__in': categories
}
if target_model is not None:
is_cls = hasattr(target_model, '__name__')
if is_cls:
concrete = False
else:
concrete = True
filter_kwargs['object_id'] = target_model.id
filter_kwargs['content_type'] = ContentType.objects.get_for_model(
target_model, for_concrete_model=concrete
)
return {
item['category_id']: item['ties_num'] for item in
get_tie_model().objects.filter(**filter_kwargs).values('category_id').annotate(ties_num=Count('category'))
} | python | def get_ties_stats(self, categories, target_model=None):
"""Returns a dict with categories popularity stats.
:param list categories:
:param Model|None target_model:
:return:
"""
filter_kwargs = {
'category_id__in': categories
}
if target_model is not None:
is_cls = hasattr(target_model, '__name__')
if is_cls:
concrete = False
else:
concrete = True
filter_kwargs['object_id'] = target_model.id
filter_kwargs['content_type'] = ContentType.objects.get_for_model(
target_model, for_concrete_model=concrete
)
return {
item['category_id']: item['ties_num'] for item in
get_tie_model().objects.filter(**filter_kwargs).values('category_id').annotate(ties_num=Count('category'))
} | [
"def",
"get_ties_stats",
"(",
"self",
",",
"categories",
",",
"target_model",
"=",
"None",
")",
":",
"filter_kwargs",
"=",
"{",
"'category_id__in'",
":",
"categories",
"}",
"if",
"target_model",
"is",
"not",
"None",
":",
"is_cls",
"=",
"hasattr",
"(",
"target_model",
",",
"'__name__'",
")",
"if",
"is_cls",
":",
"concrete",
"=",
"False",
"else",
":",
"concrete",
"=",
"True",
"filter_kwargs",
"[",
"'object_id'",
"]",
"=",
"target_model",
".",
"id",
"filter_kwargs",
"[",
"'content_type'",
"]",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"target_model",
",",
"for_concrete_model",
"=",
"concrete",
")",
"return",
"{",
"item",
"[",
"'category_id'",
"]",
":",
"item",
"[",
"'ties_num'",
"]",
"for",
"item",
"in",
"get_tie_model",
"(",
")",
".",
"objects",
".",
"filter",
"(",
"*",
"*",
"filter_kwargs",
")",
".",
"values",
"(",
"'category_id'",
")",
".",
"annotate",
"(",
"ties_num",
"=",
"Count",
"(",
"'category'",
")",
")",
"}"
] | Returns a dict with categories popularity stats.
:param list categories:
:param Model|None target_model:
:return: | [
"Returns",
"a",
"dict",
"with",
"categories",
"popularity",
"stats",
"."
] | 9b45e91fc0dcb63a0011780437fe28145e3ecce9 | https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/utils.py#L197-L221 | train |
PeerAssets/pypeerassets | pypeerassets/pautils.py | load_p2th_privkey_into_local_node | def load_p2th_privkey_into_local_node(provider: RpcNode, prod: bool=True) -> None:
'''Load PeerAssets P2TH privkey into the local node.'''
assert isinstance(provider, RpcNode), {"error": "Import only works with local node."}
error = {"error": "Loading P2TH privkey failed."}
pa_params = param_query(provider.network)
if prod:
provider.importprivkey(pa_params.P2TH_wif, "PAPROD")
# now verify if ismine == True
if not provider.validateaddress(pa_params.P2TH_addr)['ismine']:
raise P2THImportFailed(error)
else:
provider.importprivkey(pa_params.test_P2TH_wif, "PATEST")
if not provider.validateaddress(pa_params.test_P2TH_addr)['ismine']:
raise P2THImportFailed(error) | python | def load_p2th_privkey_into_local_node(provider: RpcNode, prod: bool=True) -> None:
'''Load PeerAssets P2TH privkey into the local node.'''
assert isinstance(provider, RpcNode), {"error": "Import only works with local node."}
error = {"error": "Loading P2TH privkey failed."}
pa_params = param_query(provider.network)
if prod:
provider.importprivkey(pa_params.P2TH_wif, "PAPROD")
# now verify if ismine == True
if not provider.validateaddress(pa_params.P2TH_addr)['ismine']:
raise P2THImportFailed(error)
else:
provider.importprivkey(pa_params.test_P2TH_wif, "PATEST")
if not provider.validateaddress(pa_params.test_P2TH_addr)['ismine']:
raise P2THImportFailed(error) | [
"def",
"load_p2th_privkey_into_local_node",
"(",
"provider",
":",
"RpcNode",
",",
"prod",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"assert",
"isinstance",
"(",
"provider",
",",
"RpcNode",
")",
",",
"{",
"\"error\"",
":",
"\"Import only works with local node.\"",
"}",
"error",
"=",
"{",
"\"error\"",
":",
"\"Loading P2TH privkey failed.\"",
"}",
"pa_params",
"=",
"param_query",
"(",
"provider",
".",
"network",
")",
"if",
"prod",
":",
"provider",
".",
"importprivkey",
"(",
"pa_params",
".",
"P2TH_wif",
",",
"\"PAPROD\"",
")",
"# now verify if ismine == True",
"if",
"not",
"provider",
".",
"validateaddress",
"(",
"pa_params",
".",
"P2TH_addr",
")",
"[",
"'ismine'",
"]",
":",
"raise",
"P2THImportFailed",
"(",
"error",
")",
"else",
":",
"provider",
".",
"importprivkey",
"(",
"pa_params",
".",
"test_P2TH_wif",
",",
"\"PATEST\"",
")",
"if",
"not",
"provider",
".",
"validateaddress",
"(",
"pa_params",
".",
"test_P2TH_addr",
")",
"[",
"'ismine'",
"]",
":",
"raise",
"P2THImportFailed",
"(",
"error",
")"
] | Load PeerAssets P2TH privkey into the local node. | [
"Load",
"PeerAssets",
"P2TH",
"privkey",
"into",
"the",
"local",
"node",
"."
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/pautils.py#L30-L45 | train |
PeerAssets/pypeerassets | pypeerassets/pautils.py | find_deck_spawns | def find_deck_spawns(provider: Provider, prod: bool=True) -> Iterable[str]:
'''find deck spawn transactions via Provider,
it requires that Deck spawn P2TH were imported in local node or
that remote API knows about P2TH address.'''
pa_params = param_query(provider.network)
if isinstance(provider, RpcNode):
if prod:
decks = (i["txid"] for i in provider.listtransactions("PAPROD"))
else:
decks = (i["txid"] for i in provider.listtransactions("PATEST"))
if isinstance(provider, Cryptoid) or isinstance(provider, Explorer):
if prod:
decks = (i for i in provider.listtransactions(pa_params.P2TH_addr))
else:
decks = (i for i in provider.listtransactions(pa_params.test_P2TH_addr))
return decks | python | def find_deck_spawns(provider: Provider, prod: bool=True) -> Iterable[str]:
'''find deck spawn transactions via Provider,
it requires that Deck spawn P2TH were imported in local node or
that remote API knows about P2TH address.'''
pa_params = param_query(provider.network)
if isinstance(provider, RpcNode):
if prod:
decks = (i["txid"] for i in provider.listtransactions("PAPROD"))
else:
decks = (i["txid"] for i in provider.listtransactions("PATEST"))
if isinstance(provider, Cryptoid) or isinstance(provider, Explorer):
if prod:
decks = (i for i in provider.listtransactions(pa_params.P2TH_addr))
else:
decks = (i for i in provider.listtransactions(pa_params.test_P2TH_addr))
return decks | [
"def",
"find_deck_spawns",
"(",
"provider",
":",
"Provider",
",",
"prod",
":",
"bool",
"=",
"True",
")",
"->",
"Iterable",
"[",
"str",
"]",
":",
"pa_params",
"=",
"param_query",
"(",
"provider",
".",
"network",
")",
"if",
"isinstance",
"(",
"provider",
",",
"RpcNode",
")",
":",
"if",
"prod",
":",
"decks",
"=",
"(",
"i",
"[",
"\"txid\"",
"]",
"for",
"i",
"in",
"provider",
".",
"listtransactions",
"(",
"\"PAPROD\"",
")",
")",
"else",
":",
"decks",
"=",
"(",
"i",
"[",
"\"txid\"",
"]",
"for",
"i",
"in",
"provider",
".",
"listtransactions",
"(",
"\"PATEST\"",
")",
")",
"if",
"isinstance",
"(",
"provider",
",",
"Cryptoid",
")",
"or",
"isinstance",
"(",
"provider",
",",
"Explorer",
")",
":",
"if",
"prod",
":",
"decks",
"=",
"(",
"i",
"for",
"i",
"in",
"provider",
".",
"listtransactions",
"(",
"pa_params",
".",
"P2TH_addr",
")",
")",
"else",
":",
"decks",
"=",
"(",
"i",
"for",
"i",
"in",
"provider",
".",
"listtransactions",
"(",
"pa_params",
".",
"test_P2TH_addr",
")",
")",
"return",
"decks"
] | find deck spawn transactions via Provider,
it requires that Deck spawn P2TH were imported in local node or
that remote API knows about P2TH address. | [
"find",
"deck",
"spawn",
"transactions",
"via",
"Provider",
"it",
"requires",
"that",
"Deck",
"spawn",
"P2TH",
"were",
"imported",
"in",
"local",
"node",
"or",
"that",
"remote",
"API",
"knows",
"about",
"P2TH",
"address",
"."
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/pautils.py#L57-L78 | train |
PeerAssets/pypeerassets | pypeerassets/pautils.py | deck_parser | def deck_parser(args: Tuple[Provider, dict, int, str],
prod: bool=True) -> Optional[Deck]:
'''deck parser function'''
provider = args[0]
raw_tx = args[1]
deck_version = args[2]
p2th = args[3]
try:
validate_deckspawn_p2th(provider, raw_tx, p2th)
d = parse_deckspawn_metainfo(read_tx_opreturn(raw_tx['vout'][1]),
deck_version)
if d:
d["id"] = raw_tx["txid"]
try:
d["issue_time"] = raw_tx["blocktime"]
except KeyError:
d["time"] = 0
d["issuer"] = find_tx_sender(provider, raw_tx)
d["network"] = provider.network
d["production"] = prod
d["tx_confirmations"] = raw_tx["confirmations"]
return Deck(**d)
except (InvalidDeckSpawn, InvalidDeckMetainfo, InvalidDeckVersion,
InvalidNulldataOutput) as err:
pass
return None | python | def deck_parser(args: Tuple[Provider, dict, int, str],
prod: bool=True) -> Optional[Deck]:
'''deck parser function'''
provider = args[0]
raw_tx = args[1]
deck_version = args[2]
p2th = args[3]
try:
validate_deckspawn_p2th(provider, raw_tx, p2th)
d = parse_deckspawn_metainfo(read_tx_opreturn(raw_tx['vout'][1]),
deck_version)
if d:
d["id"] = raw_tx["txid"]
try:
d["issue_time"] = raw_tx["blocktime"]
except KeyError:
d["time"] = 0
d["issuer"] = find_tx_sender(provider, raw_tx)
d["network"] = provider.network
d["production"] = prod
d["tx_confirmations"] = raw_tx["confirmations"]
return Deck(**d)
except (InvalidDeckSpawn, InvalidDeckMetainfo, InvalidDeckVersion,
InvalidNulldataOutput) as err:
pass
return None | [
"def",
"deck_parser",
"(",
"args",
":",
"Tuple",
"[",
"Provider",
",",
"dict",
",",
"int",
",",
"str",
"]",
",",
"prod",
":",
"bool",
"=",
"True",
")",
"->",
"Optional",
"[",
"Deck",
"]",
":",
"provider",
"=",
"args",
"[",
"0",
"]",
"raw_tx",
"=",
"args",
"[",
"1",
"]",
"deck_version",
"=",
"args",
"[",
"2",
"]",
"p2th",
"=",
"args",
"[",
"3",
"]",
"try",
":",
"validate_deckspawn_p2th",
"(",
"provider",
",",
"raw_tx",
",",
"p2th",
")",
"d",
"=",
"parse_deckspawn_metainfo",
"(",
"read_tx_opreturn",
"(",
"raw_tx",
"[",
"'vout'",
"]",
"[",
"1",
"]",
")",
",",
"deck_version",
")",
"if",
"d",
":",
"d",
"[",
"\"id\"",
"]",
"=",
"raw_tx",
"[",
"\"txid\"",
"]",
"try",
":",
"d",
"[",
"\"issue_time\"",
"]",
"=",
"raw_tx",
"[",
"\"blocktime\"",
"]",
"except",
"KeyError",
":",
"d",
"[",
"\"time\"",
"]",
"=",
"0",
"d",
"[",
"\"issuer\"",
"]",
"=",
"find_tx_sender",
"(",
"provider",
",",
"raw_tx",
")",
"d",
"[",
"\"network\"",
"]",
"=",
"provider",
".",
"network",
"d",
"[",
"\"production\"",
"]",
"=",
"prod",
"d",
"[",
"\"tx_confirmations\"",
"]",
"=",
"raw_tx",
"[",
"\"confirmations\"",
"]",
"return",
"Deck",
"(",
"*",
"*",
"d",
")",
"except",
"(",
"InvalidDeckSpawn",
",",
"InvalidDeckMetainfo",
",",
"InvalidDeckVersion",
",",
"InvalidNulldataOutput",
")",
"as",
"err",
":",
"pass",
"return",
"None"
] | deck parser function | [
"deck",
"parser",
"function"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/pautils.py#L81-L113 | train |
PeerAssets/pypeerassets | pypeerassets/pautils.py | tx_serialization_order | def tx_serialization_order(provider: Provider, blockhash: str, txid: str) -> int:
'''find index of this tx in the blockid'''
return provider.getblock(blockhash)["tx"].index(txid) | python | def tx_serialization_order(provider: Provider, blockhash: str, txid: str) -> int:
'''find index of this tx in the blockid'''
return provider.getblock(blockhash)["tx"].index(txid) | [
"def",
"tx_serialization_order",
"(",
"provider",
":",
"Provider",
",",
"blockhash",
":",
"str",
",",
"txid",
":",
"str",
")",
"->",
"int",
":",
"return",
"provider",
".",
"getblock",
"(",
"blockhash",
")",
"[",
"\"tx\"",
"]",
".",
"index",
"(",
"txid",
")"
] | find index of this tx in the blockid | [
"find",
"index",
"of",
"this",
"tx",
"in",
"the",
"blockid"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/pautils.py#L116-L119 | train |
PeerAssets/pypeerassets | pypeerassets/pautils.py | deck_issue_mode | def deck_issue_mode(proto: DeckSpawnProto) -> Iterable[str]:
'''interpret issue mode bitfeg'''
if proto.issue_mode == 0:
yield "NONE"
return
for mode, value in proto.MODE.items():
if value > proto.issue_mode:
continue
if value & proto.issue_mode:
yield mode | python | def deck_issue_mode(proto: DeckSpawnProto) -> Iterable[str]:
'''interpret issue mode bitfeg'''
if proto.issue_mode == 0:
yield "NONE"
return
for mode, value in proto.MODE.items():
if value > proto.issue_mode:
continue
if value & proto.issue_mode:
yield mode | [
"def",
"deck_issue_mode",
"(",
"proto",
":",
"DeckSpawnProto",
")",
"->",
"Iterable",
"[",
"str",
"]",
":",
"if",
"proto",
".",
"issue_mode",
"==",
"0",
":",
"yield",
"\"NONE\"",
"return",
"for",
"mode",
",",
"value",
"in",
"proto",
".",
"MODE",
".",
"items",
"(",
")",
":",
"if",
"value",
">",
"proto",
".",
"issue_mode",
":",
"continue",
"if",
"value",
"&",
"proto",
".",
"issue_mode",
":",
"yield",
"mode"
] | interpret issue mode bitfeg | [
"interpret",
"issue",
"mode",
"bitfeg"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/pautils.py#L141-L152 | train |
PeerAssets/pypeerassets | pypeerassets/pautils.py | parse_deckspawn_metainfo | def parse_deckspawn_metainfo(protobuf: bytes, version: int) -> dict:
'''Decode deck_spawn tx op_return protobuf message and validate it,
Raise error if deck_spawn metainfo incomplete or version mistmatch.'''
deck = DeckSpawnProto()
deck.ParseFromString(protobuf)
error = {"error": "Deck ({deck}) metainfo incomplete, deck must have a name.".format(deck=deck.name)}
if deck.name == "":
raise InvalidDeckMetainfo(error)
if deck.version != version:
raise InvalidDeckVersion({"error", "Deck version mismatch."})
return {
"version": deck.version,
"name": deck.name,
"issue_mode": deck.issue_mode,
"number_of_decimals": deck.number_of_decimals,
"asset_specific_data": deck.asset_specific_data
} | python | def parse_deckspawn_metainfo(protobuf: bytes, version: int) -> dict:
'''Decode deck_spawn tx op_return protobuf message and validate it,
Raise error if deck_spawn metainfo incomplete or version mistmatch.'''
deck = DeckSpawnProto()
deck.ParseFromString(protobuf)
error = {"error": "Deck ({deck}) metainfo incomplete, deck must have a name.".format(deck=deck.name)}
if deck.name == "":
raise InvalidDeckMetainfo(error)
if deck.version != version:
raise InvalidDeckVersion({"error", "Deck version mismatch."})
return {
"version": deck.version,
"name": deck.name,
"issue_mode": deck.issue_mode,
"number_of_decimals": deck.number_of_decimals,
"asset_specific_data": deck.asset_specific_data
} | [
"def",
"parse_deckspawn_metainfo",
"(",
"protobuf",
":",
"bytes",
",",
"version",
":",
"int",
")",
"->",
"dict",
":",
"deck",
"=",
"DeckSpawnProto",
"(",
")",
"deck",
".",
"ParseFromString",
"(",
"protobuf",
")",
"error",
"=",
"{",
"\"error\"",
":",
"\"Deck ({deck}) metainfo incomplete, deck must have a name.\"",
".",
"format",
"(",
"deck",
"=",
"deck",
".",
"name",
")",
"}",
"if",
"deck",
".",
"name",
"==",
"\"\"",
":",
"raise",
"InvalidDeckMetainfo",
"(",
"error",
")",
"if",
"deck",
".",
"version",
"!=",
"version",
":",
"raise",
"InvalidDeckVersion",
"(",
"{",
"\"error\"",
",",
"\"Deck version mismatch.\"",
"}",
")",
"return",
"{",
"\"version\"",
":",
"deck",
".",
"version",
",",
"\"name\"",
":",
"deck",
".",
"name",
",",
"\"issue_mode\"",
":",
"deck",
".",
"issue_mode",
",",
"\"number_of_decimals\"",
":",
"deck",
".",
"number_of_decimals",
",",
"\"asset_specific_data\"",
":",
"deck",
".",
"asset_specific_data",
"}"
] | Decode deck_spawn tx op_return protobuf message and validate it,
Raise error if deck_spawn metainfo incomplete or version mistmatch. | [
"Decode",
"deck_spawn",
"tx",
"op_return",
"protobuf",
"message",
"and",
"validate",
"it",
"Raise",
"error",
"if",
"deck_spawn",
"metainfo",
"incomplete",
"or",
"version",
"mistmatch",
"."
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/pautils.py#L172-L193 | train |
PeerAssets/pypeerassets | pypeerassets/pautils.py | load_deck_p2th_into_local_node | def load_deck_p2th_into_local_node(provider: RpcNode, deck: Deck) -> None:
'''
load deck p2th into local node via "importprivke",
this allows building of proof-of-timeline for this deck
'''
assert isinstance(provider, RpcNode), {"error": "You can load privkeys only into local node."}
error = {"error": "Deck P2TH import went wrong."}
provider.importprivkey(deck.p2th_wif, deck.id)
check_addr = provider.validateaddress(deck.p2th_address)
if not check_addr["isvalid"] and not check_addr["ismine"]:
raise DeckP2THImportError(error) | python | def load_deck_p2th_into_local_node(provider: RpcNode, deck: Deck) -> None:
'''
load deck p2th into local node via "importprivke",
this allows building of proof-of-timeline for this deck
'''
assert isinstance(provider, RpcNode), {"error": "You can load privkeys only into local node."}
error = {"error": "Deck P2TH import went wrong."}
provider.importprivkey(deck.p2th_wif, deck.id)
check_addr = provider.validateaddress(deck.p2th_address)
if not check_addr["isvalid"] and not check_addr["ismine"]:
raise DeckP2THImportError(error) | [
"def",
"load_deck_p2th_into_local_node",
"(",
"provider",
":",
"RpcNode",
",",
"deck",
":",
"Deck",
")",
"->",
"None",
":",
"assert",
"isinstance",
"(",
"provider",
",",
"RpcNode",
")",
",",
"{",
"\"error\"",
":",
"\"You can load privkeys only into local node.\"",
"}",
"error",
"=",
"{",
"\"error\"",
":",
"\"Deck P2TH import went wrong.\"",
"}",
"provider",
".",
"importprivkey",
"(",
"deck",
".",
"p2th_wif",
",",
"deck",
".",
"id",
")",
"check_addr",
"=",
"provider",
".",
"validateaddress",
"(",
"deck",
".",
"p2th_address",
")",
"if",
"not",
"check_addr",
"[",
"\"isvalid\"",
"]",
"and",
"not",
"check_addr",
"[",
"\"ismine\"",
"]",
":",
"raise",
"DeckP2THImportError",
"(",
"error",
")"
] | load deck p2th into local node via "importprivke",
this allows building of proof-of-timeline for this deck | [
"load",
"deck",
"p2th",
"into",
"local",
"node",
"via",
"importprivke",
"this",
"allows",
"building",
"of",
"proof",
"-",
"of",
"-",
"timeline",
"for",
"this",
"deck"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/pautils.py#L213-L226 | train |
PeerAssets/pypeerassets | pypeerassets/pautils.py | card_bundle_parser | def card_bundle_parser(bundle: CardBundle, debug=False) -> Iterator:
'''this function wraps all the card transfer parsing'''
try:
# first vout of the bundle must pay to deck.p2th
validate_card_transfer_p2th(bundle.deck, bundle.vouts[0])
# second vout must be OP_RETURN with card_metainfo
card_metainfo = parse_card_transfer_metainfo(
read_tx_opreturn(bundle.vouts[1]),
bundle.deck.version
)
# if any of this exceptions is raised, return None
except (InvalidCardTransferP2TH,
CardVersionMismatch,
CardNumberOfDecimalsMismatch,
RecieverAmountMismatch,
DecodeError,
TypeError,
InvalidNulldataOutput) as e:
if debug:
print(e) # re-do as logging later on
return
yield
# check for decimals
if not card_metainfo["number_of_decimals"] == bundle.deck.number_of_decimals:
raise CardNumberOfDecimalsMismatch(
{"error": "Number of decimals does not match."}
)
# deduce the individual cards in the bundle
cards = card_postprocess(card_metainfo, bundle.vouts)
# drop the vouts property
del bundle.__dict__['vouts']
for c in cards:
d = {**c, **bundle.__dict__}
try:
yield CardTransfer(**d)
# issuing cards to issuing address is forbidden,
# this will except the error
except InvalidCardIssue as e:
if debug:
print(e) | python | def card_bundle_parser(bundle: CardBundle, debug=False) -> Iterator:
'''this function wraps all the card transfer parsing'''
try:
# first vout of the bundle must pay to deck.p2th
validate_card_transfer_p2th(bundle.deck, bundle.vouts[0])
# second vout must be OP_RETURN with card_metainfo
card_metainfo = parse_card_transfer_metainfo(
read_tx_opreturn(bundle.vouts[1]),
bundle.deck.version
)
# if any of this exceptions is raised, return None
except (InvalidCardTransferP2TH,
CardVersionMismatch,
CardNumberOfDecimalsMismatch,
RecieverAmountMismatch,
DecodeError,
TypeError,
InvalidNulldataOutput) as e:
if debug:
print(e) # re-do as logging later on
return
yield
# check for decimals
if not card_metainfo["number_of_decimals"] == bundle.deck.number_of_decimals:
raise CardNumberOfDecimalsMismatch(
{"error": "Number of decimals does not match."}
)
# deduce the individual cards in the bundle
cards = card_postprocess(card_metainfo, bundle.vouts)
# drop the vouts property
del bundle.__dict__['vouts']
for c in cards:
d = {**c, **bundle.__dict__}
try:
yield CardTransfer(**d)
# issuing cards to issuing address is forbidden,
# this will except the error
except InvalidCardIssue as e:
if debug:
print(e) | [
"def",
"card_bundle_parser",
"(",
"bundle",
":",
"CardBundle",
",",
"debug",
"=",
"False",
")",
"->",
"Iterator",
":",
"try",
":",
"# first vout of the bundle must pay to deck.p2th",
"validate_card_transfer_p2th",
"(",
"bundle",
".",
"deck",
",",
"bundle",
".",
"vouts",
"[",
"0",
"]",
")",
"# second vout must be OP_RETURN with card_metainfo",
"card_metainfo",
"=",
"parse_card_transfer_metainfo",
"(",
"read_tx_opreturn",
"(",
"bundle",
".",
"vouts",
"[",
"1",
"]",
")",
",",
"bundle",
".",
"deck",
".",
"version",
")",
"# if any of this exceptions is raised, return None",
"except",
"(",
"InvalidCardTransferP2TH",
",",
"CardVersionMismatch",
",",
"CardNumberOfDecimalsMismatch",
",",
"RecieverAmountMismatch",
",",
"DecodeError",
",",
"TypeError",
",",
"InvalidNulldataOutput",
")",
"as",
"e",
":",
"if",
"debug",
":",
"print",
"(",
"e",
")",
"# re-do as logging later on",
"return",
"yield",
"# check for decimals",
"if",
"not",
"card_metainfo",
"[",
"\"number_of_decimals\"",
"]",
"==",
"bundle",
".",
"deck",
".",
"number_of_decimals",
":",
"raise",
"CardNumberOfDecimalsMismatch",
"(",
"{",
"\"error\"",
":",
"\"Number of decimals does not match.\"",
"}",
")",
"# deduce the individual cards in the bundle",
"cards",
"=",
"card_postprocess",
"(",
"card_metainfo",
",",
"bundle",
".",
"vouts",
")",
"# drop the vouts property",
"del",
"bundle",
".",
"__dict__",
"[",
"'vouts'",
"]",
"for",
"c",
"in",
"cards",
":",
"d",
"=",
"{",
"*",
"*",
"c",
",",
"*",
"*",
"bundle",
".",
"__dict__",
"}",
"try",
":",
"yield",
"CardTransfer",
"(",
"*",
"*",
"d",
")",
"# issuing cards to issuing address is forbidden,",
"# this will except the error",
"except",
"InvalidCardIssue",
"as",
"e",
":",
"if",
"debug",
":",
"print",
"(",
"e",
")"
] | this function wraps all the card transfer parsing | [
"this",
"function",
"wraps",
"all",
"the",
"card",
"transfer",
"parsing"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/pautils.py#L283-L333 | train |
PeerAssets/pypeerassets | pypeerassets/pa_constants.py | param_query | def param_query(name: str) -> PAParams:
'''Find the PAParams for a network by its long or short name. Raises
UnsupportedNetwork if no PAParams is found.
'''
for pa_params in params:
if name in (pa_params.network_name, pa_params.network_shortname,):
return pa_params
raise UnsupportedNetwork | python | def param_query(name: str) -> PAParams:
'''Find the PAParams for a network by its long or short name. Raises
UnsupportedNetwork if no PAParams is found.
'''
for pa_params in params:
if name in (pa_params.network_name, pa_params.network_shortname,):
return pa_params
raise UnsupportedNetwork | [
"def",
"param_query",
"(",
"name",
":",
"str",
")",
"->",
"PAParams",
":",
"for",
"pa_params",
"in",
"params",
":",
"if",
"name",
"in",
"(",
"pa_params",
".",
"network_name",
",",
"pa_params",
".",
"network_shortname",
",",
")",
":",
"return",
"pa_params",
"raise",
"UnsupportedNetwork"
] | Find the PAParams for a network by its long or short name. Raises
UnsupportedNetwork if no PAParams is found. | [
"Find",
"the",
"PAParams",
"for",
"a",
"network",
"by",
"its",
"long",
"or",
"short",
"name",
".",
"Raises",
"UnsupportedNetwork",
"if",
"no",
"PAParams",
"is",
"found",
"."
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/pa_constants.py#L33-L42 | train |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/gui/qt_b26_gui.py | ControlMainWindow.load_scripts | def load_scripts(self):
"""
opens file dialog to load scripts into gui
"""
# update scripts so that current settings do not get lost
for index in range(self.tree_scripts.topLevelItemCount()):
script_item = self.tree_scripts.topLevelItem(index)
self.update_script_from_item(script_item)
dialog = LoadDialog(elements_type="scripts", elements_old=self.scripts,
filename=self.gui_settings['scripts_folder'])
if dialog.exec_():
self.gui_settings['scripts_folder'] = str(dialog.txt_probe_log_path.text())
scripts = dialog.get_values()
added_scripts = set(scripts.keys()) - set(self.scripts.keys())
removed_scripts = set(self.scripts.keys()) - set(scripts.keys())
if 'data_folder' in list(self.gui_settings.keys()) and os.path.exists(self.gui_settings['data_folder']):
data_folder_name = self.gui_settings['data_folder']
else:
data_folder_name = None
# create instances of new instruments/scripts
self.scripts, loaded_failed, self.instruments = Script.load_and_append(
script_dict={name: scripts[name] for name in added_scripts},
scripts=self.scripts,
instruments=self.instruments,
log_function=self.log,
data_path=data_folder_name)
# delete instances of new instruments/scripts that have been deselected
for name in removed_scripts:
del self.scripts[name] | python | def load_scripts(self):
"""
opens file dialog to load scripts into gui
"""
# update scripts so that current settings do not get lost
for index in range(self.tree_scripts.topLevelItemCount()):
script_item = self.tree_scripts.topLevelItem(index)
self.update_script_from_item(script_item)
dialog = LoadDialog(elements_type="scripts", elements_old=self.scripts,
filename=self.gui_settings['scripts_folder'])
if dialog.exec_():
self.gui_settings['scripts_folder'] = str(dialog.txt_probe_log_path.text())
scripts = dialog.get_values()
added_scripts = set(scripts.keys()) - set(self.scripts.keys())
removed_scripts = set(self.scripts.keys()) - set(scripts.keys())
if 'data_folder' in list(self.gui_settings.keys()) and os.path.exists(self.gui_settings['data_folder']):
data_folder_name = self.gui_settings['data_folder']
else:
data_folder_name = None
# create instances of new instruments/scripts
self.scripts, loaded_failed, self.instruments = Script.load_and_append(
script_dict={name: scripts[name] for name in added_scripts},
scripts=self.scripts,
instruments=self.instruments,
log_function=self.log,
data_path=data_folder_name)
# delete instances of new instruments/scripts that have been deselected
for name in removed_scripts:
del self.scripts[name] | [
"def",
"load_scripts",
"(",
"self",
")",
":",
"# update scripts so that current settings do not get lost",
"for",
"index",
"in",
"range",
"(",
"self",
".",
"tree_scripts",
".",
"topLevelItemCount",
"(",
")",
")",
":",
"script_item",
"=",
"self",
".",
"tree_scripts",
".",
"topLevelItem",
"(",
"index",
")",
"self",
".",
"update_script_from_item",
"(",
"script_item",
")",
"dialog",
"=",
"LoadDialog",
"(",
"elements_type",
"=",
"\"scripts\"",
",",
"elements_old",
"=",
"self",
".",
"scripts",
",",
"filename",
"=",
"self",
".",
"gui_settings",
"[",
"'scripts_folder'",
"]",
")",
"if",
"dialog",
".",
"exec_",
"(",
")",
":",
"self",
".",
"gui_settings",
"[",
"'scripts_folder'",
"]",
"=",
"str",
"(",
"dialog",
".",
"txt_probe_log_path",
".",
"text",
"(",
")",
")",
"scripts",
"=",
"dialog",
".",
"get_values",
"(",
")",
"added_scripts",
"=",
"set",
"(",
"scripts",
".",
"keys",
"(",
")",
")",
"-",
"set",
"(",
"self",
".",
"scripts",
".",
"keys",
"(",
")",
")",
"removed_scripts",
"=",
"set",
"(",
"self",
".",
"scripts",
".",
"keys",
"(",
")",
")",
"-",
"set",
"(",
"scripts",
".",
"keys",
"(",
")",
")",
"if",
"'data_folder'",
"in",
"list",
"(",
"self",
".",
"gui_settings",
".",
"keys",
"(",
")",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"gui_settings",
"[",
"'data_folder'",
"]",
")",
":",
"data_folder_name",
"=",
"self",
".",
"gui_settings",
"[",
"'data_folder'",
"]",
"else",
":",
"data_folder_name",
"=",
"None",
"# create instances of new instruments/scripts",
"self",
".",
"scripts",
",",
"loaded_failed",
",",
"self",
".",
"instruments",
"=",
"Script",
".",
"load_and_append",
"(",
"script_dict",
"=",
"{",
"name",
":",
"scripts",
"[",
"name",
"]",
"for",
"name",
"in",
"added_scripts",
"}",
",",
"scripts",
"=",
"self",
".",
"scripts",
",",
"instruments",
"=",
"self",
".",
"instruments",
",",
"log_function",
"=",
"self",
".",
"log",
",",
"data_path",
"=",
"data_folder_name",
")",
"# delete instances of new instruments/scripts that have been deselected",
"for",
"name",
"in",
"removed_scripts",
":",
"del",
"self",
".",
"scripts",
"[",
"name",
"]"
] | opens file dialog to load scripts into gui | [
"opens",
"file",
"dialog",
"to",
"load",
"scripts",
"into",
"gui"
] | 67482e5157fcd1c40705e5c2cacfb93564703ed0 | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/gui/qt_b26_gui.py#L523-L558 | train |
PeerAssets/pypeerassets | pypeerassets/provider/explorer.py | Explorer.getblockhash | def getblockhash(self, index: int) -> str:
'''Returns the hash of the block at ; index 0 is the genesis block.'''
return cast(str, self.api_fetch('getblockhash?index=' + str(index))) | python | def getblockhash(self, index: int) -> str:
'''Returns the hash of the block at ; index 0 is the genesis block.'''
return cast(str, self.api_fetch('getblockhash?index=' + str(index))) | [
"def",
"getblockhash",
"(",
"self",
",",
"index",
":",
"int",
")",
"->",
"str",
":",
"return",
"cast",
"(",
"str",
",",
"self",
".",
"api_fetch",
"(",
"'getblockhash?index='",
"+",
"str",
"(",
"index",
")",
")",
")"
] | Returns the hash of the block at ; index 0 is the genesis block. | [
"Returns",
"the",
"hash",
"of",
"the",
"block",
"at",
";",
"index",
"0",
"is",
"the",
"genesis",
"block",
"."
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/provider/explorer.py#L73-L76 | train |
PeerAssets/pypeerassets | pypeerassets/provider/explorer.py | Explorer.getblock | def getblock(self, hash: str) -> dict:
'''Returns information about the block with the given hash.'''
return cast(dict, self.api_fetch('getblock?hash=' + hash)) | python | def getblock(self, hash: str) -> dict:
'''Returns information about the block with the given hash.'''
return cast(dict, self.api_fetch('getblock?hash=' + hash)) | [
"def",
"getblock",
"(",
"self",
",",
"hash",
":",
"str",
")",
"->",
"dict",
":",
"return",
"cast",
"(",
"dict",
",",
"self",
".",
"api_fetch",
"(",
"'getblock?hash='",
"+",
"hash",
")",
")"
] | Returns information about the block with the given hash. | [
"Returns",
"information",
"about",
"the",
"block",
"with",
"the",
"given",
"hash",
"."
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/provider/explorer.py#L78-L81 | train |
PeerAssets/pypeerassets | pypeerassets/provider/explorer.py | Explorer.getaddress | def getaddress(self, address: str) -> dict:
'''Returns information for given address.'''
return cast(dict, self.ext_fetch('getaddress/' + address)) | python | def getaddress(self, address: str) -> dict:
'''Returns information for given address.'''
return cast(dict, self.ext_fetch('getaddress/' + address)) | [
"def",
"getaddress",
"(",
"self",
",",
"address",
":",
"str",
")",
"->",
"dict",
":",
"return",
"cast",
"(",
"dict",
",",
"self",
".",
"ext_fetch",
"(",
"'getaddress/'",
"+",
"address",
")",
")"
] | Returns information for given address. | [
"Returns",
"information",
"for",
"given",
"address",
"."
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/provider/explorer.py#L106-L109 | train |
PeerAssets/pypeerassets | pypeerassets/provider/explorer.py | Explorer.listunspent | def listunspent(self, address: str) -> list:
'''Returns unspent transactions for given address.'''
try:
return cast(dict, self.ext_fetch('listunspent/' + address))['unspent_outputs']
except KeyError:
raise InsufficientFunds('Insufficient funds.') | python | def listunspent(self, address: str) -> list:
'''Returns unspent transactions for given address.'''
try:
return cast(dict, self.ext_fetch('listunspent/' + address))['unspent_outputs']
except KeyError:
raise InsufficientFunds('Insufficient funds.') | [
"def",
"listunspent",
"(",
"self",
",",
"address",
":",
"str",
")",
"->",
"list",
":",
"try",
":",
"return",
"cast",
"(",
"dict",
",",
"self",
".",
"ext_fetch",
"(",
"'listunspent/'",
"+",
"address",
")",
")",
"[",
"'unspent_outputs'",
"]",
"except",
"KeyError",
":",
"raise",
"InsufficientFunds",
"(",
"'Insufficient funds.'",
")"
] | Returns unspent transactions for given address. | [
"Returns",
"unspent",
"transactions",
"for",
"given",
"address",
"."
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/provider/explorer.py#L111-L117 | train |
PeerAssets/pypeerassets | pypeerassets/provider/explorer.py | Explorer.txinfo | def txinfo(self, txid: str) -> dict:
'''Returns information about given transaction.'''
return cast(dict, self.ext_fetch('txinfo/' + txid)) | python | def txinfo(self, txid: str) -> dict:
'''Returns information about given transaction.'''
return cast(dict, self.ext_fetch('txinfo/' + txid)) | [
"def",
"txinfo",
"(",
"self",
",",
"txid",
":",
"str",
")",
"->",
"dict",
":",
"return",
"cast",
"(",
"dict",
",",
"self",
".",
"ext_fetch",
"(",
"'txinfo/'",
"+",
"txid",
")",
")"
] | Returns information about given transaction. | [
"Returns",
"information",
"about",
"given",
"transaction",
"."
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/provider/explorer.py#L141-L144 | train |
PeerAssets/pypeerassets | pypeerassets/provider/explorer.py | Explorer.getbalance | def getbalance(self, address: str) -> Decimal:
'''Returns current balance of given address.'''
try:
return Decimal(cast(float, self.ext_fetch('getbalance/' + address)))
except TypeError:
return Decimal(0) | python | def getbalance(self, address: str) -> Decimal:
'''Returns current balance of given address.'''
try:
return Decimal(cast(float, self.ext_fetch('getbalance/' + address)))
except TypeError:
return Decimal(0) | [
"def",
"getbalance",
"(",
"self",
",",
"address",
":",
"str",
")",
"->",
"Decimal",
":",
"try",
":",
"return",
"Decimal",
"(",
"cast",
"(",
"float",
",",
"self",
".",
"ext_fetch",
"(",
"'getbalance/'",
"+",
"address",
")",
")",
")",
"except",
"TypeError",
":",
"return",
"Decimal",
"(",
"0",
")"
] | Returns current balance of given address. | [
"Returns",
"current",
"balance",
"of",
"given",
"address",
"."
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/provider/explorer.py#L146-L152 | train |
johnnoone/json-spec | src/jsonspec/pointer/bases.py | DocumentPointer.extract | def extract(self, obj, bypass_ref=False):
"""
Extract subelement from obj, according to pointer.
It assums that document is the object.
:param obj: the object source
:param bypass_ref: disable JSON Reference errors
"""
return self.pointer.extract(obj, bypass_ref) | python | def extract(self, obj, bypass_ref=False):
"""
Extract subelement from obj, according to pointer.
It assums that document is the object.
:param obj: the object source
:param bypass_ref: disable JSON Reference errors
"""
return self.pointer.extract(obj, bypass_ref) | [
"def",
"extract",
"(",
"self",
",",
"obj",
",",
"bypass_ref",
"=",
"False",
")",
":",
"return",
"self",
".",
"pointer",
".",
"extract",
"(",
"obj",
",",
"bypass_ref",
")"
] | Extract subelement from obj, according to pointer.
It assums that document is the object.
:param obj: the object source
:param bypass_ref: disable JSON Reference errors | [
"Extract",
"subelement",
"from",
"obj",
"according",
"to",
"pointer",
".",
"It",
"assums",
"that",
"document",
"is",
"the",
"object",
"."
] | f91981724cea0c366bd42a6670eb07bbe31c0e0c | https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/pointer/bases.py#L40-L48 | train |
johnnoone/json-spec | src/jsonspec/pointer/bases.py | Pointer.parse | def parse(self, pointer):
"""parse pointer into tokens"""
if isinstance(pointer, Pointer):
return pointer.tokens[:]
elif pointer == '':
return []
tokens = []
staged, _, children = pointer.partition('/')
if staged:
try:
token = StagesToken(staged)
token.last = False
tokens.append(token)
except ValueError:
raise ParseError('pointer must start with / or int', pointer)
if _:
for part in children.split('/'):
part = part.replace('~1', '/')
part = part.replace('~0', '~')
token = ChildToken(part)
token.last = False
tokens.append(token)
return tokens | python | def parse(self, pointer):
"""parse pointer into tokens"""
if isinstance(pointer, Pointer):
return pointer.tokens[:]
elif pointer == '':
return []
tokens = []
staged, _, children = pointer.partition('/')
if staged:
try:
token = StagesToken(staged)
token.last = False
tokens.append(token)
except ValueError:
raise ParseError('pointer must start with / or int', pointer)
if _:
for part in children.split('/'):
part = part.replace('~1', '/')
part = part.replace('~0', '~')
token = ChildToken(part)
token.last = False
tokens.append(token)
return tokens | [
"def",
"parse",
"(",
"self",
",",
"pointer",
")",
":",
"if",
"isinstance",
"(",
"pointer",
",",
"Pointer",
")",
":",
"return",
"pointer",
".",
"tokens",
"[",
":",
"]",
"elif",
"pointer",
"==",
"''",
":",
"return",
"[",
"]",
"tokens",
"=",
"[",
"]",
"staged",
",",
"_",
",",
"children",
"=",
"pointer",
".",
"partition",
"(",
"'/'",
")",
"if",
"staged",
":",
"try",
":",
"token",
"=",
"StagesToken",
"(",
"staged",
")",
"token",
".",
"last",
"=",
"False",
"tokens",
".",
"append",
"(",
"token",
")",
"except",
"ValueError",
":",
"raise",
"ParseError",
"(",
"'pointer must start with / or int'",
",",
"pointer",
")",
"if",
"_",
":",
"for",
"part",
"in",
"children",
".",
"split",
"(",
"'/'",
")",
":",
"part",
"=",
"part",
".",
"replace",
"(",
"'~1'",
",",
"'/'",
")",
"part",
"=",
"part",
".",
"replace",
"(",
"'~0'",
",",
"'~'",
")",
"token",
"=",
"ChildToken",
"(",
"part",
")",
"token",
".",
"last",
"=",
"False",
"tokens",
".",
"append",
"(",
"token",
")",
"return",
"tokens"
] | parse pointer into tokens | [
"parse",
"pointer",
"into",
"tokens"
] | f91981724cea0c366bd42a6670eb07bbe31c0e0c | https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/pointer/bases.py#L96-L121 | train |
johnnoone/json-spec | src/jsonspec/pointer/bases.py | Pointer.extract | def extract(self, obj, bypass_ref=False):
"""
Extract subelement from obj, according to tokens.
:param obj: the object source
:param bypass_ref: disable JSON Reference errors
"""
for token in self.tokens:
obj = token.extract(obj, bypass_ref)
return obj | python | def extract(self, obj, bypass_ref=False):
"""
Extract subelement from obj, according to tokens.
:param obj: the object source
:param bypass_ref: disable JSON Reference errors
"""
for token in self.tokens:
obj = token.extract(obj, bypass_ref)
return obj | [
"def",
"extract",
"(",
"self",
",",
"obj",
",",
"bypass_ref",
"=",
"False",
")",
":",
"for",
"token",
"in",
"self",
".",
"tokens",
":",
"obj",
"=",
"token",
".",
"extract",
"(",
"obj",
",",
"bypass_ref",
")",
"return",
"obj"
] | Extract subelement from obj, according to tokens.
:param obj: the object source
:param bypass_ref: disable JSON Reference errors | [
"Extract",
"subelement",
"from",
"obj",
"according",
"to",
"tokens",
"."
] | f91981724cea0c366bd42a6670eb07bbe31c0e0c | https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/pointer/bases.py#L123-L132 | train |
johnnoone/json-spec | src/jsonspec/pointer/bases.py | StagesToken.extract | def extract(self, obj, bypass_ref=False):
"""
Extract parent of obj, according to current token.
:param obj: the object source
:param bypass_ref: not used
"""
for i in range(0, self.stages):
try:
obj = obj.parent_obj
except AttributeError:
raise UnstagedError(obj, '{!r} must be staged before '
'exploring its parents'.format(obj))
if self.member:
return obj.parent_member
return obj | python | def extract(self, obj, bypass_ref=False):
"""
Extract parent of obj, according to current token.
:param obj: the object source
:param bypass_ref: not used
"""
for i in range(0, self.stages):
try:
obj = obj.parent_obj
except AttributeError:
raise UnstagedError(obj, '{!r} must be staged before '
'exploring its parents'.format(obj))
if self.member:
return obj.parent_member
return obj | [
"def",
"extract",
"(",
"self",
",",
"obj",
",",
"bypass_ref",
"=",
"False",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"stages",
")",
":",
"try",
":",
"obj",
"=",
"obj",
".",
"parent_obj",
"except",
"AttributeError",
":",
"raise",
"UnstagedError",
"(",
"obj",
",",
"'{!r} must be staged before '",
"'exploring its parents'",
".",
"format",
"(",
"obj",
")",
")",
"if",
"self",
".",
"member",
":",
"return",
"obj",
".",
"parent_member",
"return",
"obj"
] | Extract parent of obj, according to current token.
:param obj: the object source
:param bypass_ref: not used | [
"Extract",
"parent",
"of",
"obj",
"according",
"to",
"current",
"token",
"."
] | f91981724cea0c366bd42a6670eb07bbe31c0e0c | https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/pointer/bases.py#L190-L205 | train |
johnnoone/json-spec | src/jsonspec/pointer/bases.py | ChildToken.extract | def extract(self, obj, bypass_ref=False):
"""
Extract subelement from obj, according to current token.
:param obj: the object source
:param bypass_ref: disable JSON Reference errors
"""
try:
if isinstance(obj, Mapping):
if not bypass_ref and '$ref' in obj:
raise RefError(obj, 'presence of a $ref member')
obj = self.extract_mapping(obj)
elif isinstance(obj, Sequence) and not isinstance(obj, string_types):
obj = self.extract_sequence(obj)
else:
raise WrongType(obj, '{!r} does not apply '
'for {!r}'.format(str(self), obj))
if isinstance(obj, Mapping):
if not bypass_ref and '$ref' in obj:
raise RefError(obj, 'presence of a $ref member')
return obj
except ExtractError as error:
logger.exception(error)
raise
except Exception as error:
logger.exception(error)
args = [arg for arg in error.args if arg not in (self, obj)]
raise ExtractError(obj, *args) | python | def extract(self, obj, bypass_ref=False):
"""
Extract subelement from obj, according to current token.
:param obj: the object source
:param bypass_ref: disable JSON Reference errors
"""
try:
if isinstance(obj, Mapping):
if not bypass_ref and '$ref' in obj:
raise RefError(obj, 'presence of a $ref member')
obj = self.extract_mapping(obj)
elif isinstance(obj, Sequence) and not isinstance(obj, string_types):
obj = self.extract_sequence(obj)
else:
raise WrongType(obj, '{!r} does not apply '
'for {!r}'.format(str(self), obj))
if isinstance(obj, Mapping):
if not bypass_ref and '$ref' in obj:
raise RefError(obj, 'presence of a $ref member')
return obj
except ExtractError as error:
logger.exception(error)
raise
except Exception as error:
logger.exception(error)
args = [arg for arg in error.args if arg not in (self, obj)]
raise ExtractError(obj, *args) | [
"def",
"extract",
"(",
"self",
",",
"obj",
",",
"bypass_ref",
"=",
"False",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"obj",
",",
"Mapping",
")",
":",
"if",
"not",
"bypass_ref",
"and",
"'$ref'",
"in",
"obj",
":",
"raise",
"RefError",
"(",
"obj",
",",
"'presence of a $ref member'",
")",
"obj",
"=",
"self",
".",
"extract_mapping",
"(",
"obj",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"obj",
",",
"string_types",
")",
":",
"obj",
"=",
"self",
".",
"extract_sequence",
"(",
"obj",
")",
"else",
":",
"raise",
"WrongType",
"(",
"obj",
",",
"'{!r} does not apply '",
"'for {!r}'",
".",
"format",
"(",
"str",
"(",
"self",
")",
",",
"obj",
")",
")",
"if",
"isinstance",
"(",
"obj",
",",
"Mapping",
")",
":",
"if",
"not",
"bypass_ref",
"and",
"'$ref'",
"in",
"obj",
":",
"raise",
"RefError",
"(",
"obj",
",",
"'presence of a $ref member'",
")",
"return",
"obj",
"except",
"ExtractError",
"as",
"error",
":",
"logger",
".",
"exception",
"(",
"error",
")",
"raise",
"except",
"Exception",
"as",
"error",
":",
"logger",
".",
"exception",
"(",
"error",
")",
"args",
"=",
"[",
"arg",
"for",
"arg",
"in",
"error",
".",
"args",
"if",
"arg",
"not",
"in",
"(",
"self",
",",
"obj",
")",
"]",
"raise",
"ExtractError",
"(",
"obj",
",",
"*",
"args",
")"
] | Extract subelement from obj, according to current token.
:param obj: the object source
:param bypass_ref: disable JSON Reference errors | [
"Extract",
"subelement",
"from",
"obj",
"according",
"to",
"current",
"token",
"."
] | f91981724cea0c366bd42a6670eb07bbe31c0e0c | https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/pointer/bases.py#L212-L240 | train |
samstav/requests-chef | requests_chef/mixlib_auth.py | digester | def digester(data):
"""Create SHA-1 hash, get digest, b64 encode, split every 60 char."""
if not isinstance(data, six.binary_type):
data = data.encode('utf_8')
hashof = hashlib.sha1(data).digest()
encoded_hash = base64.b64encode(hashof)
if not isinstance(encoded_hash, six.string_types):
encoded_hash = encoded_hash.decode('utf_8')
chunked = splitter(encoded_hash, chunksize=60)
lines = '\n'.join(chunked)
return lines | python | def digester(data):
"""Create SHA-1 hash, get digest, b64 encode, split every 60 char."""
if not isinstance(data, six.binary_type):
data = data.encode('utf_8')
hashof = hashlib.sha1(data).digest()
encoded_hash = base64.b64encode(hashof)
if not isinstance(encoded_hash, six.string_types):
encoded_hash = encoded_hash.decode('utf_8')
chunked = splitter(encoded_hash, chunksize=60)
lines = '\n'.join(chunked)
return lines | [
"def",
"digester",
"(",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"six",
".",
"binary_type",
")",
":",
"data",
"=",
"data",
".",
"encode",
"(",
"'utf_8'",
")",
"hashof",
"=",
"hashlib",
".",
"sha1",
"(",
"data",
")",
".",
"digest",
"(",
")",
"encoded_hash",
"=",
"base64",
".",
"b64encode",
"(",
"hashof",
")",
"if",
"not",
"isinstance",
"(",
"encoded_hash",
",",
"six",
".",
"string_types",
")",
":",
"encoded_hash",
"=",
"encoded_hash",
".",
"decode",
"(",
"'utf_8'",
")",
"chunked",
"=",
"splitter",
"(",
"encoded_hash",
",",
"chunksize",
"=",
"60",
")",
"lines",
"=",
"'\\n'",
".",
"join",
"(",
"chunked",
")",
"return",
"lines"
] | Create SHA-1 hash, get digest, b64 encode, split every 60 char. | [
"Create",
"SHA",
"-",
"1",
"hash",
"get",
"digest",
"b64",
"encode",
"split",
"every",
"60",
"char",
"."
] | a0bf013b925abd0cf76eeaf6300cf32659632773 | https://github.com/samstav/requests-chef/blob/a0bf013b925abd0cf76eeaf6300cf32659632773/requests_chef/mixlib_auth.py#L38-L48 | train |
samstav/requests-chef | requests_chef/mixlib_auth.py | splitter | def splitter(iterable, chunksize=60):
"""Split an iterable that supports indexing into chunks of 'chunksize'."""
return (iterable[0+i:chunksize+i]
for i in range(0, len(iterable), chunksize)) | python | def splitter(iterable, chunksize=60):
"""Split an iterable that supports indexing into chunks of 'chunksize'."""
return (iterable[0+i:chunksize+i]
for i in range(0, len(iterable), chunksize)) | [
"def",
"splitter",
"(",
"iterable",
",",
"chunksize",
"=",
"60",
")",
":",
"return",
"(",
"iterable",
"[",
"0",
"+",
"i",
":",
"chunksize",
"+",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"iterable",
")",
",",
"chunksize",
")",
")"
] | Split an iterable that supports indexing into chunks of 'chunksize'. | [
"Split",
"an",
"iterable",
"that",
"supports",
"indexing",
"into",
"chunks",
"of",
"chunksize",
"."
] | a0bf013b925abd0cf76eeaf6300cf32659632773 | https://github.com/samstav/requests-chef/blob/a0bf013b925abd0cf76eeaf6300cf32659632773/requests_chef/mixlib_auth.py#L60-L63 | train |
samstav/requests-chef | requests_chef/mixlib_auth.py | ChefAuth.canonical_request | def canonical_request(self, method, path, content, timestamp):
"""Return the canonical request string."""
request = collections.OrderedDict([
('Method', method.upper()),
('Hashed Path', path),
('X-Ops-Content-Hash', content),
('X-Ops-Timestamp', timestamp),
('X-Ops-UserId', self.user_id),
])
return '\n'.join(['%s:%s' % (key, value)
for key, value in request.items()]) | python | def canonical_request(self, method, path, content, timestamp):
"""Return the canonical request string."""
request = collections.OrderedDict([
('Method', method.upper()),
('Hashed Path', path),
('X-Ops-Content-Hash', content),
('X-Ops-Timestamp', timestamp),
('X-Ops-UserId', self.user_id),
])
return '\n'.join(['%s:%s' % (key, value)
for key, value in request.items()]) | [
"def",
"canonical_request",
"(",
"self",
",",
"method",
",",
"path",
",",
"content",
",",
"timestamp",
")",
":",
"request",
"=",
"collections",
".",
"OrderedDict",
"(",
"[",
"(",
"'Method'",
",",
"method",
".",
"upper",
"(",
")",
")",
",",
"(",
"'Hashed Path'",
",",
"path",
")",
",",
"(",
"'X-Ops-Content-Hash'",
",",
"content",
")",
",",
"(",
"'X-Ops-Timestamp'",
",",
"timestamp",
")",
",",
"(",
"'X-Ops-UserId'",
",",
"self",
".",
"user_id",
")",
",",
"]",
")",
"return",
"'\\n'",
".",
"join",
"(",
"[",
"'%s:%s'",
"%",
"(",
"key",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"request",
".",
"items",
"(",
")",
"]",
")"
] | Return the canonical request string. | [
"Return",
"the",
"canonical",
"request",
"string",
"."
] | a0bf013b925abd0cf76eeaf6300cf32659632773 | https://github.com/samstav/requests-chef/blob/a0bf013b925abd0cf76eeaf6300cf32659632773/requests_chef/mixlib_auth.py#L127-L137 | train |
samstav/requests-chef | requests_chef/mixlib_auth.py | RSAKey.load_pem | def load_pem(cls, private_key, password=None):
"""Return a PrivateKey instance.
:param private_key: Private key string (PEM format) or the path
to a local private key file.
"""
# TODO(sam): try to break this in tests
maybe_path = normpath(private_key)
if os.path.isfile(maybe_path):
with open(maybe_path, 'rb') as pkf:
private_key = pkf.read()
if not isinstance(private_key, six.binary_type):
private_key = private_key.encode('utf-8')
pkey = serialization.load_pem_private_key(
private_key,
password=password,
backend=crypto_backends.default_backend())
return cls(pkey) | python | def load_pem(cls, private_key, password=None):
"""Return a PrivateKey instance.
:param private_key: Private key string (PEM format) or the path
to a local private key file.
"""
# TODO(sam): try to break this in tests
maybe_path = normpath(private_key)
if os.path.isfile(maybe_path):
with open(maybe_path, 'rb') as pkf:
private_key = pkf.read()
if not isinstance(private_key, six.binary_type):
private_key = private_key.encode('utf-8')
pkey = serialization.load_pem_private_key(
private_key,
password=password,
backend=crypto_backends.default_backend())
return cls(pkey) | [
"def",
"load_pem",
"(",
"cls",
",",
"private_key",
",",
"password",
"=",
"None",
")",
":",
"# TODO(sam): try to break this in tests",
"maybe_path",
"=",
"normpath",
"(",
"private_key",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"maybe_path",
")",
":",
"with",
"open",
"(",
"maybe_path",
",",
"'rb'",
")",
"as",
"pkf",
":",
"private_key",
"=",
"pkf",
".",
"read",
"(",
")",
"if",
"not",
"isinstance",
"(",
"private_key",
",",
"six",
".",
"binary_type",
")",
":",
"private_key",
"=",
"private_key",
".",
"encode",
"(",
"'utf-8'",
")",
"pkey",
"=",
"serialization",
".",
"load_pem_private_key",
"(",
"private_key",
",",
"password",
"=",
"password",
",",
"backend",
"=",
"crypto_backends",
".",
"default_backend",
"(",
")",
")",
"return",
"cls",
"(",
"pkey",
")"
] | Return a PrivateKey instance.
:param private_key: Private key string (PEM format) or the path
to a local private key file. | [
"Return",
"a",
"PrivateKey",
"instance",
"."
] | a0bf013b925abd0cf76eeaf6300cf32659632773 | https://github.com/samstav/requests-chef/blob/a0bf013b925abd0cf76eeaf6300cf32659632773/requests_chef/mixlib_auth.py#L159-L177 | train |
samstav/requests-chef | requests_chef/mixlib_auth.py | RSAKey.sign | def sign(self, data, b64=True):
"""Sign data with the private key and return the signed data.
The signed data will be Base64 encoded if b64 is True.
"""
padder = padding.PKCS1v15()
signer = self.private_key.signer(padder, None)
if not isinstance(data, six.binary_type):
data = data.encode('utf_8')
signer.update(data)
signed = signer.finalize()
if b64:
signed = base64.b64encode(signed)
return signed | python | def sign(self, data, b64=True):
"""Sign data with the private key and return the signed data.
The signed data will be Base64 encoded if b64 is True.
"""
padder = padding.PKCS1v15()
signer = self.private_key.signer(padder, None)
if not isinstance(data, six.binary_type):
data = data.encode('utf_8')
signer.update(data)
signed = signer.finalize()
if b64:
signed = base64.b64encode(signed)
return signed | [
"def",
"sign",
"(",
"self",
",",
"data",
",",
"b64",
"=",
"True",
")",
":",
"padder",
"=",
"padding",
".",
"PKCS1v15",
"(",
")",
"signer",
"=",
"self",
".",
"private_key",
".",
"signer",
"(",
"padder",
",",
"None",
")",
"if",
"not",
"isinstance",
"(",
"data",
",",
"six",
".",
"binary_type",
")",
":",
"data",
"=",
"data",
".",
"encode",
"(",
"'utf_8'",
")",
"signer",
".",
"update",
"(",
"data",
")",
"signed",
"=",
"signer",
".",
"finalize",
"(",
")",
"if",
"b64",
":",
"signed",
"=",
"base64",
".",
"b64encode",
"(",
"signed",
")",
"return",
"signed"
] | Sign data with the private key and return the signed data.
The signed data will be Base64 encoded if b64 is True. | [
"Sign",
"data",
"with",
"the",
"private",
"key",
"and",
"return",
"the",
"signed",
"data",
"."
] | a0bf013b925abd0cf76eeaf6300cf32659632773 | https://github.com/samstav/requests-chef/blob/a0bf013b925abd0cf76eeaf6300cf32659632773/requests_chef/mixlib_auth.py#L179-L192 | train |
heronotears/lazyxml | lazyxml/__init__.py | dump | def dump(obj, fp, **kw):
r"""Dump python object to file.
>>> import lazyxml
>>> data = {'demo': {'foo': 1, 'bar': 2}}
>>> lazyxml.dump(data, 'dump.xml')
>>> with open('dump-fp.xml', 'w') as fp:
>>> lazyxml.dump(data, fp)
>>> from cStringIO import StringIO
>>> data = {'demo': {'foo': 1, 'bar': 2}}
>>> buffer = StringIO()
>>> lazyxml.dump(data, buffer)
>>> buffer.getvalue()
<?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[1]]></foo><bar><![CDATA[2]]></bar></demo>
>>> buffer.close()
.. note::
``kw`` argument have the same meaning as in :func:`dumps`
:param obj: data for dump to xml.
:param fp: a filename or a file or file-like object that support ``.write()`` to write the xml content
.. versionchanged:: 1.2
The `fp` is a filename of string before this. It can now be a file or file-like object that support ``.write()`` to write the xml content.
"""
xml = dumps(obj, **kw)
if isinstance(fp, basestring):
with open(fp, 'w') as fobj:
fobj.write(xml)
else:
fp.write(xml) | python | def dump(obj, fp, **kw):
r"""Dump python object to file.
>>> import lazyxml
>>> data = {'demo': {'foo': 1, 'bar': 2}}
>>> lazyxml.dump(data, 'dump.xml')
>>> with open('dump-fp.xml', 'w') as fp:
>>> lazyxml.dump(data, fp)
>>> from cStringIO import StringIO
>>> data = {'demo': {'foo': 1, 'bar': 2}}
>>> buffer = StringIO()
>>> lazyxml.dump(data, buffer)
>>> buffer.getvalue()
<?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[1]]></foo><bar><![CDATA[2]]></bar></demo>
>>> buffer.close()
.. note::
``kw`` argument have the same meaning as in :func:`dumps`
:param obj: data for dump to xml.
:param fp: a filename or a file or file-like object that support ``.write()`` to write the xml content
.. versionchanged:: 1.2
The `fp` is a filename of string before this. It can now be a file or file-like object that support ``.write()`` to write the xml content.
"""
xml = dumps(obj, **kw)
if isinstance(fp, basestring):
with open(fp, 'w') as fobj:
fobj.write(xml)
else:
fp.write(xml) | [
"def",
"dump",
"(",
"obj",
",",
"fp",
",",
"*",
"*",
"kw",
")",
":",
"xml",
"=",
"dumps",
"(",
"obj",
",",
"*",
"*",
"kw",
")",
"if",
"isinstance",
"(",
"fp",
",",
"basestring",
")",
":",
"with",
"open",
"(",
"fp",
",",
"'w'",
")",
"as",
"fobj",
":",
"fobj",
".",
"write",
"(",
"xml",
")",
"else",
":",
"fp",
".",
"write",
"(",
"xml",
")"
] | r"""Dump python object to file.
>>> import lazyxml
>>> data = {'demo': {'foo': 1, 'bar': 2}}
>>> lazyxml.dump(data, 'dump.xml')
>>> with open('dump-fp.xml', 'w') as fp:
>>> lazyxml.dump(data, fp)
>>> from cStringIO import StringIO
>>> data = {'demo': {'foo': 1, 'bar': 2}}
>>> buffer = StringIO()
>>> lazyxml.dump(data, buffer)
>>> buffer.getvalue()
<?xml version="1.0" encoding="utf-8"?><demo><foo><![CDATA[1]]></foo><bar><![CDATA[2]]></bar></demo>
>>> buffer.close()
.. note::
``kw`` argument have the same meaning as in :func:`dumps`
:param obj: data for dump to xml.
:param fp: a filename or a file or file-like object that support ``.write()`` to write the xml content
.. versionchanged:: 1.2
The `fp` is a filename of string before this. It can now be a file or file-like object that support ``.write()`` to write the xml content. | [
"r",
"Dump",
"python",
"object",
"to",
"file",
"."
] | e3f1ebd3f34cfa03d022ddec90e17d60c1c81953 | https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/lazyxml/__init__.py#L149-L180 | train |
Subsets and Splits