repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
tamasgal/km3pipe | km3pipe/plot.py | joint_hex | def joint_hex(x, y, **kwargs):
"""Seaborn Joint Hexplot with marginal KDE + hists."""
return sns.jointplot(
x, y, kind='hex', stat_func=None, marginal_kws={'kde': True}, **kwargs
) | python | def joint_hex(x, y, **kwargs):
"""Seaborn Joint Hexplot with marginal KDE + hists."""
return sns.jointplot(
x, y, kind='hex', stat_func=None, marginal_kws={'kde': True}, **kwargs
) | [
"def",
"joint_hex",
"(",
"x",
",",
"y",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"sns",
".",
"jointplot",
"(",
"x",
",",
"y",
",",
"kind",
"=",
"'hex'",
",",
"stat_func",
"=",
"None",
",",
"marginal_kws",
"=",
"{",
"'kde'",
":",
"True",
"}",
",",
"*",
"*",
"kwargs",
")"
] | Seaborn Joint Hexplot with marginal KDE + hists. | [
"Seaborn",
"Joint",
"Hexplot",
"with",
"marginal",
"KDE",
"+",
"hists",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/plot.py#L111-L115 | train |
IRC-SPHERE/HyperStream | hyperstream/workflow/workflow.py | Workflow.execute | def execute(self, time_interval):
"""
Here we execute the factors over the streams in the workflow
Execute the factors in reverse order. We can't just execute the last factor because there may be multiple
"leaf" factors that aren't triggered by upstream computations.
:param time_interval: The time interval to execute this workflow over
"""
# TODO: What if the leaf nodes have different time intervals?
# if not self._hyperstream:
# raise ValueError("")
with WorkflowMonitor(self):
# First look for asset writers
for factor in self.factors[::-1]:
if factor.tool.name == "asset_writer":
factor.execute(time_interval)
for factor in self.factors[::-1]:
if factor.sink is None or factor.sink.is_leaf and factor.tool.name != "asset_writer":
factor.execute(time_interval) | python | def execute(self, time_interval):
"""
Here we execute the factors over the streams in the workflow
Execute the factors in reverse order. We can't just execute the last factor because there may be multiple
"leaf" factors that aren't triggered by upstream computations.
:param time_interval: The time interval to execute this workflow over
"""
# TODO: What if the leaf nodes have different time intervals?
# if not self._hyperstream:
# raise ValueError("")
with WorkflowMonitor(self):
# First look for asset writers
for factor in self.factors[::-1]:
if factor.tool.name == "asset_writer":
factor.execute(time_interval)
for factor in self.factors[::-1]:
if factor.sink is None or factor.sink.is_leaf and factor.tool.name != "asset_writer":
factor.execute(time_interval) | [
"def",
"execute",
"(",
"self",
",",
"time_interval",
")",
":",
"# TODO: What if the leaf nodes have different time intervals?",
"# if not self._hyperstream:",
"# raise ValueError(\"\")",
"with",
"WorkflowMonitor",
"(",
"self",
")",
":",
"# First look for asset writers",
"for",
"factor",
"in",
"self",
".",
"factors",
"[",
":",
":",
"-",
"1",
"]",
":",
"if",
"factor",
".",
"tool",
".",
"name",
"==",
"\"asset_writer\"",
":",
"factor",
".",
"execute",
"(",
"time_interval",
")",
"for",
"factor",
"in",
"self",
".",
"factors",
"[",
":",
":",
"-",
"1",
"]",
":",
"if",
"factor",
".",
"sink",
"is",
"None",
"or",
"factor",
".",
"sink",
".",
"is_leaf",
"and",
"factor",
".",
"tool",
".",
"name",
"!=",
"\"asset_writer\"",
":",
"factor",
".",
"execute",
"(",
"time_interval",
")"
] | Here we execute the factors over the streams in the workflow
Execute the factors in reverse order. We can't just execute the last factor because there may be multiple
"leaf" factors that aren't triggered by upstream computations.
:param time_interval: The time interval to execute this workflow over | [
"Here",
"we",
"execute",
"the",
"factors",
"over",
"the",
"streams",
"in",
"the",
"workflow",
"Execute",
"the",
"factors",
"in",
"reverse",
"order",
".",
"We",
"can",
"t",
"just",
"execute",
"the",
"last",
"factor",
"because",
"there",
"may",
"be",
"multiple",
"leaf",
"factors",
"that",
"aren",
"t",
"triggered",
"by",
"upstream",
"computations",
"."
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L87-L107 | train |
IRC-SPHERE/HyperStream | hyperstream/workflow/workflow.py | Workflow._add_node | def _add_node(self, node):
"""
Add a node to the workflow
:param node: The node object
:type node: Node
:return: None
"""
self.nodes[node.node_id] = node
logging.info("Added node with id {} containing {} streams".format(node.node_id, len(node.streams))) | python | def _add_node(self, node):
"""
Add a node to the workflow
:param node: The node object
:type node: Node
:return: None
"""
self.nodes[node.node_id] = node
logging.info("Added node with id {} containing {} streams".format(node.node_id, len(node.streams))) | [
"def",
"_add_node",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"nodes",
"[",
"node",
".",
"node_id",
"]",
"=",
"node",
"logging",
".",
"info",
"(",
"\"Added node with id {} containing {} streams\"",
".",
"format",
"(",
"node",
".",
"node_id",
",",
"len",
"(",
"node",
".",
"streams",
")",
")",
")"
] | Add a node to the workflow
:param node: The node object
:type node: Node
:return: None | [
"Add",
"a",
"node",
"to",
"the",
"workflow"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L109-L118 | train |
IRC-SPHERE/HyperStream | hyperstream/workflow/workflow.py | Workflow._add_factor | def _add_factor(self, factor):
"""
Add a factor to the workflow
:param factor: The factor object
:type factor: Factor | MultiOutputFactor | NodeCreationFactor
:return: None
"""
self.factors.append(factor)
logging.info("Added factor with tool {} ".format(factor.tool)) | python | def _add_factor(self, factor):
"""
Add a factor to the workflow
:param factor: The factor object
:type factor: Factor | MultiOutputFactor | NodeCreationFactor
:return: None
"""
self.factors.append(factor)
logging.info("Added factor with tool {} ".format(factor.tool)) | [
"def",
"_add_factor",
"(",
"self",
",",
"factor",
")",
":",
"self",
".",
"factors",
".",
"append",
"(",
"factor",
")",
"logging",
".",
"info",
"(",
"\"Added factor with tool {} \"",
".",
"format",
"(",
"factor",
".",
"tool",
")",
")"
] | Add a factor to the workflow
:param factor: The factor object
:type factor: Factor | MultiOutputFactor | NodeCreationFactor
:return: None | [
"Add",
"a",
"factor",
"to",
"the",
"workflow"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L120-L129 | train |
IRC-SPHERE/HyperStream | hyperstream/workflow/workflow.py | Workflow.create_factor_general | def create_factor_general(self, *args, **kwargs):
"""
General signature for factor creation that tries each of the factor creation types using duck typing
:param args: The positional arguments
:param kwargs: The named arguments
:return: The created factor
"""
try:
return self.create_factor(*args, **kwargs)
except TypeError:
pass
try:
return self.create_multi_output_factor(*args, **kwargs)
except TypeError:
pass
try:
return self.create_node_creation_factor(*args, **kwargs)
except TypeError:
pass
raise FactorDefinitionError("Could not find a matching signature") | python | def create_factor_general(self, *args, **kwargs):
"""
General signature for factor creation that tries each of the factor creation types using duck typing
:param args: The positional arguments
:param kwargs: The named arguments
:return: The created factor
"""
try:
return self.create_factor(*args, **kwargs)
except TypeError:
pass
try:
return self.create_multi_output_factor(*args, **kwargs)
except TypeError:
pass
try:
return self.create_node_creation_factor(*args, **kwargs)
except TypeError:
pass
raise FactorDefinitionError("Could not find a matching signature") | [
"def",
"create_factor_general",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"self",
".",
"create_factor",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"TypeError",
":",
"pass",
"try",
":",
"return",
"self",
".",
"create_multi_output_factor",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"TypeError",
":",
"pass",
"try",
":",
"return",
"self",
".",
"create_node_creation_factor",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"TypeError",
":",
"pass",
"raise",
"FactorDefinitionError",
"(",
"\"Could not find a matching signature\"",
")"
] | General signature for factor creation that tries each of the factor creation types using duck typing
:param args: The positional arguments
:param kwargs: The named arguments
:return: The created factor | [
"General",
"signature",
"for",
"factor",
"creation",
"that",
"tries",
"each",
"of",
"the",
"factor",
"creation",
"types",
"using",
"duck",
"typing"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L164-L184 | train |
IRC-SPHERE/HyperStream | hyperstream/workflow/workflow.py | Workflow.create_multi_output_factor | def create_multi_output_factor(self, tool, source, splitting_node, sink):
"""
Creates a multi-output factor.
This takes a single node, applies a MultiOutputTool to create multiple nodes on a new plate
Instantiates a single tool for all of the input plate values,
and connects the source and sink nodes with that tool.
Note that the tool parameters these are currently fixed over a plate. For parameters that vary over a plate,
an extra input stream should be used
:param tool: The tool to use. This is either an instantiated Tool object or a dict with "name" and "parameters"
:param source: The source node
:param splitting_node: The node over which to split
:param sink: The sink node
:return: The factor object
:type tool: MultiOutputTool | dict
:type source: Node | None
:type sink: Node
:rtype: Factor
"""
if source and not isinstance(source, Node):
raise ValueError("Expected Node, got {}".format(type(source)))
if not isinstance(sink, Node):
raise ValueError("Expected Node, got {}".format(type(sink)))
# if isinstance(tool, dict):
# tool = self.channels.get_tool(**tool)
if not isinstance(tool, MultiOutputTool):
raise ValueError("Expected MultiOutputTool, got {}".format(type(tool)))
# Check that the input_plate are compatible - note this is the opposite way round to a normal factor
input_plates = source.plates if source else []
output_plates = sink.plates
if len(input_plates) > 1:
raise NotImplementedError
if len(output_plates) == 0:
raise ValueError("No output plate found")
if len(output_plates) == 1:
if not self.check_multi_output_plate_compatibility(input_plates, output_plates[0]):
raise IncompatiblePlatesError("Parent plate does not match input plate")
factor = MultiOutputFactor(tool=tool, source_node=source, splitting_node=splitting_node, sink_node=sink,
input_plate=input_plates[0] if input_plates else None,
output_plates=output_plates[0])
else:
# The output plates should be the same as the input plates, except for one
# additional plate. Since we're currently only supporting one input plate,
# we can safely assume that there is a single matching plate.
# Finally, note that the output plate must either have no parents
# (i.e. it is at the root of the tree), or the parent plate is somewhere
# in the input plate's ancestry
if len(output_plates) > 2:
raise NotImplementedError
if len(input_plates) != 1:
raise IncompatiblePlatesError("Require an input plate to match all but one of the output plates")
if output_plates[0] == input_plates[0]:
# Found a match, so the output plate should be the other plate
output_plate = output_plates[1]
else:
if output_plates[1].plate_id != input_plates[0].plate_id:
raise IncompatiblePlatesError("Require an input plate to match all but one of the output plates")
output_plate = output_plates[0]
# Swap them round so the new plate is the last plate - this is required by the factor
output_plates[1], output_plates[0] = output_plates[0], output_plates[1]
if not output_plate.is_root:
# We need to walk up the input plate's parent tree
match = False
parent = input_plates[0].parent
while parent is not None:
if parent.plate_id == output_plate.parent.plate_id:
match = True
break
parent = parent.parent
if not match:
raise IncompatiblePlatesError("Require an input plate to match all but one of the output plates")
factor = MultiOutputFactor(
tool=tool, source_node=source, sink_node=sink,
splitting_node=splitting_node, input_plate=input_plates[0], output_plates=output_plates)
self._add_factor(factor)
return factor | python | def create_multi_output_factor(self, tool, source, splitting_node, sink):
"""
Creates a multi-output factor.
This takes a single node, applies a MultiOutputTool to create multiple nodes on a new plate
Instantiates a single tool for all of the input plate values,
and connects the source and sink nodes with that tool.
Note that the tool parameters these are currently fixed over a plate. For parameters that vary over a plate,
an extra input stream should be used
:param tool: The tool to use. This is either an instantiated Tool object or a dict with "name" and "parameters"
:param source: The source node
:param splitting_node: The node over which to split
:param sink: The sink node
:return: The factor object
:type tool: MultiOutputTool | dict
:type source: Node | None
:type sink: Node
:rtype: Factor
"""
if source and not isinstance(source, Node):
raise ValueError("Expected Node, got {}".format(type(source)))
if not isinstance(sink, Node):
raise ValueError("Expected Node, got {}".format(type(sink)))
# if isinstance(tool, dict):
# tool = self.channels.get_tool(**tool)
if not isinstance(tool, MultiOutputTool):
raise ValueError("Expected MultiOutputTool, got {}".format(type(tool)))
# Check that the input_plate are compatible - note this is the opposite way round to a normal factor
input_plates = source.plates if source else []
output_plates = sink.plates
if len(input_plates) > 1:
raise NotImplementedError
if len(output_plates) == 0:
raise ValueError("No output plate found")
if len(output_plates) == 1:
if not self.check_multi_output_plate_compatibility(input_plates, output_plates[0]):
raise IncompatiblePlatesError("Parent plate does not match input plate")
factor = MultiOutputFactor(tool=tool, source_node=source, splitting_node=splitting_node, sink_node=sink,
input_plate=input_plates[0] if input_plates else None,
output_plates=output_plates[0])
else:
# The output plates should be the same as the input plates, except for one
# additional plate. Since we're currently only supporting one input plate,
# we can safely assume that there is a single matching plate.
# Finally, note that the output plate must either have no parents
# (i.e. it is at the root of the tree), or the parent plate is somewhere
# in the input plate's ancestry
if len(output_plates) > 2:
raise NotImplementedError
if len(input_plates) != 1:
raise IncompatiblePlatesError("Require an input plate to match all but one of the output plates")
if output_plates[0] == input_plates[0]:
# Found a match, so the output plate should be the other plate
output_plate = output_plates[1]
else:
if output_plates[1].plate_id != input_plates[0].plate_id:
raise IncompatiblePlatesError("Require an input plate to match all but one of the output plates")
output_plate = output_plates[0]
# Swap them round so the new plate is the last plate - this is required by the factor
output_plates[1], output_plates[0] = output_plates[0], output_plates[1]
if not output_plate.is_root:
# We need to walk up the input plate's parent tree
match = False
parent = input_plates[0].parent
while parent is not None:
if parent.plate_id == output_plate.parent.plate_id:
match = True
break
parent = parent.parent
if not match:
raise IncompatiblePlatesError("Require an input plate to match all but one of the output plates")
factor = MultiOutputFactor(
tool=tool, source_node=source, sink_node=sink,
splitting_node=splitting_node, input_plate=input_plates[0], output_plates=output_plates)
self._add_factor(factor)
return factor | [
"def",
"create_multi_output_factor",
"(",
"self",
",",
"tool",
",",
"source",
",",
"splitting_node",
",",
"sink",
")",
":",
"if",
"source",
"and",
"not",
"isinstance",
"(",
"source",
",",
"Node",
")",
":",
"raise",
"ValueError",
"(",
"\"Expected Node, got {}\"",
".",
"format",
"(",
"type",
"(",
"source",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"sink",
",",
"Node",
")",
":",
"raise",
"ValueError",
"(",
"\"Expected Node, got {}\"",
".",
"format",
"(",
"type",
"(",
"sink",
")",
")",
")",
"# if isinstance(tool, dict):",
"# tool = self.channels.get_tool(**tool)",
"if",
"not",
"isinstance",
"(",
"tool",
",",
"MultiOutputTool",
")",
":",
"raise",
"ValueError",
"(",
"\"Expected MultiOutputTool, got {}\"",
".",
"format",
"(",
"type",
"(",
"tool",
")",
")",
")",
"# Check that the input_plate are compatible - note this is the opposite way round to a normal factor",
"input_plates",
"=",
"source",
".",
"plates",
"if",
"source",
"else",
"[",
"]",
"output_plates",
"=",
"sink",
".",
"plates",
"if",
"len",
"(",
"input_plates",
")",
">",
"1",
":",
"raise",
"NotImplementedError",
"if",
"len",
"(",
"output_plates",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"No output plate found\"",
")",
"if",
"len",
"(",
"output_plates",
")",
"==",
"1",
":",
"if",
"not",
"self",
".",
"check_multi_output_plate_compatibility",
"(",
"input_plates",
",",
"output_plates",
"[",
"0",
"]",
")",
":",
"raise",
"IncompatiblePlatesError",
"(",
"\"Parent plate does not match input plate\"",
")",
"factor",
"=",
"MultiOutputFactor",
"(",
"tool",
"=",
"tool",
",",
"source_node",
"=",
"source",
",",
"splitting_node",
"=",
"splitting_node",
",",
"sink_node",
"=",
"sink",
",",
"input_plate",
"=",
"input_plates",
"[",
"0",
"]",
"if",
"input_plates",
"else",
"None",
",",
"output_plates",
"=",
"output_plates",
"[",
"0",
"]",
")",
"else",
":",
"# The output plates should be the same as the input plates, except for one",
"# additional plate. Since we're currently only supporting one input plate,",
"# we can safely assume that there is a single matching plate.",
"# Finally, note that the output plate must either have no parents",
"# (i.e. it is at the root of the tree), or the parent plate is somewhere",
"# in the input plate's ancestry",
"if",
"len",
"(",
"output_plates",
")",
">",
"2",
":",
"raise",
"NotImplementedError",
"if",
"len",
"(",
"input_plates",
")",
"!=",
"1",
":",
"raise",
"IncompatiblePlatesError",
"(",
"\"Require an input plate to match all but one of the output plates\"",
")",
"if",
"output_plates",
"[",
"0",
"]",
"==",
"input_plates",
"[",
"0",
"]",
":",
"# Found a match, so the output plate should be the other plate",
"output_plate",
"=",
"output_plates",
"[",
"1",
"]",
"else",
":",
"if",
"output_plates",
"[",
"1",
"]",
".",
"plate_id",
"!=",
"input_plates",
"[",
"0",
"]",
".",
"plate_id",
":",
"raise",
"IncompatiblePlatesError",
"(",
"\"Require an input plate to match all but one of the output plates\"",
")",
"output_plate",
"=",
"output_plates",
"[",
"0",
"]",
"# Swap them round so the new plate is the last plate - this is required by the factor",
"output_plates",
"[",
"1",
"]",
",",
"output_plates",
"[",
"0",
"]",
"=",
"output_plates",
"[",
"0",
"]",
",",
"output_plates",
"[",
"1",
"]",
"if",
"not",
"output_plate",
".",
"is_root",
":",
"# We need to walk up the input plate's parent tree",
"match",
"=",
"False",
"parent",
"=",
"input_plates",
"[",
"0",
"]",
".",
"parent",
"while",
"parent",
"is",
"not",
"None",
":",
"if",
"parent",
".",
"plate_id",
"==",
"output_plate",
".",
"parent",
".",
"plate_id",
":",
"match",
"=",
"True",
"break",
"parent",
"=",
"parent",
".",
"parent",
"if",
"not",
"match",
":",
"raise",
"IncompatiblePlatesError",
"(",
"\"Require an input plate to match all but one of the output plates\"",
")",
"factor",
"=",
"MultiOutputFactor",
"(",
"tool",
"=",
"tool",
",",
"source_node",
"=",
"source",
",",
"sink_node",
"=",
"sink",
",",
"splitting_node",
"=",
"splitting_node",
",",
"input_plate",
"=",
"input_plates",
"[",
"0",
"]",
",",
"output_plates",
"=",
"output_plates",
")",
"self",
".",
"_add_factor",
"(",
"factor",
")",
"return",
"factor"
] | Creates a multi-output factor.
This takes a single node, applies a MultiOutputTool to create multiple nodes on a new plate
Instantiates a single tool for all of the input plate values,
and connects the source and sink nodes with that tool.
Note that the tool parameters these are currently fixed over a plate. For parameters that vary over a plate,
an extra input stream should be used
:param tool: The tool to use. This is either an instantiated Tool object or a dict with "name" and "parameters"
:param source: The source node
:param splitting_node: The node over which to split
:param sink: The sink node
:return: The factor object
:type tool: MultiOutputTool | dict
:type source: Node | None
:type sink: Node
:rtype: Factor | [
"Creates",
"a",
"multi",
"-",
"output",
"factor",
".",
"This",
"takes",
"a",
"single",
"node",
"applies",
"a",
"MultiOutputTool",
"to",
"create",
"multiple",
"nodes",
"on",
"a",
"new",
"plate",
"Instantiates",
"a",
"single",
"tool",
"for",
"all",
"of",
"the",
"input",
"plate",
"values",
"and",
"connects",
"the",
"source",
"and",
"sink",
"nodes",
"with",
"that",
"tool",
"."
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L287-L374 | train |
IRC-SPHERE/HyperStream | hyperstream/workflow/workflow.py | Workflow.create_node_creation_factor | def create_node_creation_factor(self, tool, source, output_plate, plate_manager):
"""
Creates a factor that itself creates an output node, and ensures that the plate for the output node exists
along with all relevant meta-data
:param tool: The tool
:param source: The source node
:param output_plate: The details of the plate that will be created (dict)
:param plate_manager: The hyperstream plate manager
:type output_plate: dict
:type plate_manager: PlateManager
:return: The created factor
"""
# if isinstance(tool, dict):
# tool = self.channels.get_tool(**tool)
if not isinstance(tool, PlateCreationTool):
raise ValueError("Expected PlateCreationTool, got {}".format(type(tool)))
input_plates = source.plates if source else []
if len(input_plates) > 1:
raise NotImplementedError
factor = NodeCreationFactor(
tool=tool,
source_node=source,
input_plate=input_plates[0] if input_plates else None,
output_plate=output_plate,
plate_manager=plate_manager
)
self._add_factor(factor)
return factor | python | def create_node_creation_factor(self, tool, source, output_plate, plate_manager):
"""
Creates a factor that itself creates an output node, and ensures that the plate for the output node exists
along with all relevant meta-data
:param tool: The tool
:param source: The source node
:param output_plate: The details of the plate that will be created (dict)
:param plate_manager: The hyperstream plate manager
:type output_plate: dict
:type plate_manager: PlateManager
:return: The created factor
"""
# if isinstance(tool, dict):
# tool = self.channels.get_tool(**tool)
if not isinstance(tool, PlateCreationTool):
raise ValueError("Expected PlateCreationTool, got {}".format(type(tool)))
input_plates = source.plates if source else []
if len(input_plates) > 1:
raise NotImplementedError
factor = NodeCreationFactor(
tool=tool,
source_node=source,
input_plate=input_plates[0] if input_plates else None,
output_plate=output_plate,
plate_manager=plate_manager
)
self._add_factor(factor)
return factor | [
"def",
"create_node_creation_factor",
"(",
"self",
",",
"tool",
",",
"source",
",",
"output_plate",
",",
"plate_manager",
")",
":",
"# if isinstance(tool, dict):",
"# tool = self.channels.get_tool(**tool)",
"if",
"not",
"isinstance",
"(",
"tool",
",",
"PlateCreationTool",
")",
":",
"raise",
"ValueError",
"(",
"\"Expected PlateCreationTool, got {}\"",
".",
"format",
"(",
"type",
"(",
"tool",
")",
")",
")",
"input_plates",
"=",
"source",
".",
"plates",
"if",
"source",
"else",
"[",
"]",
"if",
"len",
"(",
"input_plates",
")",
">",
"1",
":",
"raise",
"NotImplementedError",
"factor",
"=",
"NodeCreationFactor",
"(",
"tool",
"=",
"tool",
",",
"source_node",
"=",
"source",
",",
"input_plate",
"=",
"input_plates",
"[",
"0",
"]",
"if",
"input_plates",
"else",
"None",
",",
"output_plate",
"=",
"output_plate",
",",
"plate_manager",
"=",
"plate_manager",
")",
"self",
".",
"_add_factor",
"(",
"factor",
")",
"return",
"factor"
] | Creates a factor that itself creates an output node, and ensures that the plate for the output node exists
along with all relevant meta-data
:param tool: The tool
:param source: The source node
:param output_plate: The details of the plate that will be created (dict)
:param plate_manager: The hyperstream plate manager
:type output_plate: dict
:type plate_manager: PlateManager
:return: The created factor | [
"Creates",
"a",
"factor",
"that",
"itself",
"creates",
"an",
"output",
"node",
"and",
"ensures",
"that",
"the",
"plate",
"for",
"the",
"output",
"node",
"exists",
"along",
"with",
"all",
"relevant",
"meta",
"-",
"data"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L376-L409 | train |
IRC-SPHERE/HyperStream | hyperstream/workflow/workflow.py | Workflow.check_plate_compatibility | def check_plate_compatibility(tool, source_plate, sink_plate):
"""
Checks whether the source and sink plate are compatible given the tool
:param tool: The tool
:param source_plate: The source plate
:param sink_plate: The sink plate
:return: Either an error, or None
:type tool: Tool
:type source_plate: Plate
:type sink_plate: Plate
:rtype: None | str
"""
if sink_plate == source_plate.parent:
return None
# could be that they have the same meta data, but the sink plate is a simplification of the source
# plate (e.g. when using IndexOf tool)
if sink_plate.meta_data_id == source_plate.meta_data_id:
if sink_plate.is_sub_plate(source_plate):
return None
return "Sink plate {} is not a simplification of source plate {}".format(
sink_plate.plate_id, source_plate.plate_id)
# Also check to see if the meta data differs by only one value
meta_data_diff = set(source_plate.ancestor_meta_data_ids) - set(sink_plate.ancestor_meta_data_ids)
if len(meta_data_diff) == 1:
# Is the diff value the same as the aggregation meta id passed to the aggregate tool
if tool.aggregation_meta_data not in meta_data_diff:
return "Aggregate tool meta data ({}) " \
"does not match the diff between source and sink plates ({})".format(
tool.aggregation_meta_data, list(meta_data_diff)[0])
else:
return "{} not in source's parent plates".format(sink_plate.plate_id) | python | def check_plate_compatibility(tool, source_plate, sink_plate):
"""
Checks whether the source and sink plate are compatible given the tool
:param tool: The tool
:param source_plate: The source plate
:param sink_plate: The sink plate
:return: Either an error, or None
:type tool: Tool
:type source_plate: Plate
:type sink_plate: Plate
:rtype: None | str
"""
if sink_plate == source_plate.parent:
return None
# could be that they have the same meta data, but the sink plate is a simplification of the source
# plate (e.g. when using IndexOf tool)
if sink_plate.meta_data_id == source_plate.meta_data_id:
if sink_plate.is_sub_plate(source_plate):
return None
return "Sink plate {} is not a simplification of source plate {}".format(
sink_plate.plate_id, source_plate.plate_id)
# Also check to see if the meta data differs by only one value
meta_data_diff = set(source_plate.ancestor_meta_data_ids) - set(sink_plate.ancestor_meta_data_ids)
if len(meta_data_diff) == 1:
# Is the diff value the same as the aggregation meta id passed to the aggregate tool
if tool.aggregation_meta_data not in meta_data_diff:
return "Aggregate tool meta data ({}) " \
"does not match the diff between source and sink plates ({})".format(
tool.aggregation_meta_data, list(meta_data_diff)[0])
else:
return "{} not in source's parent plates".format(sink_plate.plate_id) | [
"def",
"check_plate_compatibility",
"(",
"tool",
",",
"source_plate",
",",
"sink_plate",
")",
":",
"if",
"sink_plate",
"==",
"source_plate",
".",
"parent",
":",
"return",
"None",
"# could be that they have the same meta data, but the sink plate is a simplification of the source",
"# plate (e.g. when using IndexOf tool)",
"if",
"sink_plate",
".",
"meta_data_id",
"==",
"source_plate",
".",
"meta_data_id",
":",
"if",
"sink_plate",
".",
"is_sub_plate",
"(",
"source_plate",
")",
":",
"return",
"None",
"return",
"\"Sink plate {} is not a simplification of source plate {}\"",
".",
"format",
"(",
"sink_plate",
".",
"plate_id",
",",
"source_plate",
".",
"plate_id",
")",
"# Also check to see if the meta data differs by only one value",
"meta_data_diff",
"=",
"set",
"(",
"source_plate",
".",
"ancestor_meta_data_ids",
")",
"-",
"set",
"(",
"sink_plate",
".",
"ancestor_meta_data_ids",
")",
"if",
"len",
"(",
"meta_data_diff",
")",
"==",
"1",
":",
"# Is the diff value the same as the aggregation meta id passed to the aggregate tool",
"if",
"tool",
".",
"aggregation_meta_data",
"not",
"in",
"meta_data_diff",
":",
"return",
"\"Aggregate tool meta data ({}) \"",
"\"does not match the diff between source and sink plates ({})\"",
".",
"format",
"(",
"tool",
".",
"aggregation_meta_data",
",",
"list",
"(",
"meta_data_diff",
")",
"[",
"0",
"]",
")",
"else",
":",
"return",
"\"{} not in source's parent plates\"",
".",
"format",
"(",
"sink_plate",
".",
"plate_id",
")"
] | Checks whether the source and sink plate are compatible given the tool
:param tool: The tool
:param source_plate: The source plate
:param sink_plate: The sink plate
:return: Either an error, or None
:type tool: Tool
:type source_plate: Plate
:type sink_plate: Plate
:rtype: None | str | [
"Checks",
"whether",
"the",
"source",
"and",
"sink",
"plate",
"are",
"compatible",
"given",
"the",
"tool"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L412-L445 | train |
IRC-SPHERE/HyperStream | hyperstream/workflow/workflow.py | Workflow.check_multi_output_plate_compatibility | def check_multi_output_plate_compatibility(source_plates, sink_plate):
"""
Check multi-output plate compatibility. This ensures that the source plates and sink plates match for a multi-
output plate
:param source_plates: The source plates
:param sink_plate: The sink plate
:return: True if the plates are compatible
"""
if len(source_plates) == 0:
if sink_plate.parent is not None:
return False
else:
if sink_plate.parent is None:
return False
else:
if sink_plate.parent.plate_id != source_plates[0].plate_id:
return False
return True | python | def check_multi_output_plate_compatibility(source_plates, sink_plate):
"""
Check multi-output plate compatibility. This ensures that the source plates and sink plates match for a multi-
output plate
:param source_plates: The source plates
:param sink_plate: The sink plate
:return: True if the plates are compatible
"""
if len(source_plates) == 0:
if sink_plate.parent is not None:
return False
else:
if sink_plate.parent is None:
return False
else:
if sink_plate.parent.plate_id != source_plates[0].plate_id:
return False
return True | [
"def",
"check_multi_output_plate_compatibility",
"(",
"source_plates",
",",
"sink_plate",
")",
":",
"if",
"len",
"(",
"source_plates",
")",
"==",
"0",
":",
"if",
"sink_plate",
".",
"parent",
"is",
"not",
"None",
":",
"return",
"False",
"else",
":",
"if",
"sink_plate",
".",
"parent",
"is",
"None",
":",
"return",
"False",
"else",
":",
"if",
"sink_plate",
".",
"parent",
".",
"plate_id",
"!=",
"source_plates",
"[",
"0",
"]",
".",
"plate_id",
":",
"return",
"False",
"return",
"True"
] | Check multi-output plate compatibility. This ensures that the source plates and sink plates match for a multi-
output plate
:param source_plates: The source plates
:param sink_plate: The sink plate
:return: True if the plates are compatible | [
"Check",
"multi",
"-",
"output",
"plate",
"compatibility",
".",
"This",
"ensures",
"that",
"the",
"source",
"plates",
"and",
"sink",
"plates",
"match",
"for",
"a",
"multi",
"-",
"output",
"plate"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L448-L466 | train |
IRC-SPHERE/HyperStream | hyperstream/workflow/workflow.py | Workflow.to_dict | def to_dict(self, tool_long_names=True):
"""
Get a representation of the workflow as a dictionary for display purposes
:param tool_long_names: Indicates whether to use long names, such as
SplitterFromStream(element=None, use_mapping_keys_only=True)
or short names, such as
splitter_from_stream
:type tool_long_names: bool
:return: The dictionary of nodes, factors and plates
"""
d = dict(nodes=[], factors=[], plates=defaultdict(list))
for node in self.nodes:
node_id = self.nodes[node].node_id
d['nodes'].append({'id': node_id})
for plate_id in self.nodes[node].plate_ids:
d['plates'][plate_id].append({'id': node_id, 'type': 'node'})
for factor in self.factors:
tool = str(factor.tool) if tool_long_names else factor.tool.name
try:
sources = [s.node_id for s in factor.sources]
except AttributeError:
if factor.source:
sources = [factor.source.node_id]
else:
sources = []
d['factors'].append({
'id': tool,
'sources': sources,
'sink': factor.sink.node_id})
try:
if factor.plates:
for plate in factor.plates:
d['plates'][plate.plate_id].append({'id': tool, 'type': 'factor'})
else:
d['plates']['root'].append({'id': tool, 'type': 'factor'})
except AttributeError:
pass
d['plates'] = dict(d['plates'])
return d | python | def to_dict(self, tool_long_names=True):
"""
Get a representation of the workflow as a dictionary for display purposes
:param tool_long_names: Indicates whether to use long names, such as
SplitterFromStream(element=None, use_mapping_keys_only=True)
or short names, such as
splitter_from_stream
:type tool_long_names: bool
:return: The dictionary of nodes, factors and plates
"""
d = dict(nodes=[], factors=[], plates=defaultdict(list))
for node in self.nodes:
node_id = self.nodes[node].node_id
d['nodes'].append({'id': node_id})
for plate_id in self.nodes[node].plate_ids:
d['plates'][plate_id].append({'id': node_id, 'type': 'node'})
for factor in self.factors:
tool = str(factor.tool) if tool_long_names else factor.tool.name
try:
sources = [s.node_id for s in factor.sources]
except AttributeError:
if factor.source:
sources = [factor.source.node_id]
else:
sources = []
d['factors'].append({
'id': tool,
'sources': sources,
'sink': factor.sink.node_id})
try:
if factor.plates:
for plate in factor.plates:
d['plates'][plate.plate_id].append({'id': tool, 'type': 'factor'})
else:
d['plates']['root'].append({'id': tool, 'type': 'factor'})
except AttributeError:
pass
d['plates'] = dict(d['plates'])
return d | [
"def",
"to_dict",
"(",
"self",
",",
"tool_long_names",
"=",
"True",
")",
":",
"d",
"=",
"dict",
"(",
"nodes",
"=",
"[",
"]",
",",
"factors",
"=",
"[",
"]",
",",
"plates",
"=",
"defaultdict",
"(",
"list",
")",
")",
"for",
"node",
"in",
"self",
".",
"nodes",
":",
"node_id",
"=",
"self",
".",
"nodes",
"[",
"node",
"]",
".",
"node_id",
"d",
"[",
"'nodes'",
"]",
".",
"append",
"(",
"{",
"'id'",
":",
"node_id",
"}",
")",
"for",
"plate_id",
"in",
"self",
".",
"nodes",
"[",
"node",
"]",
".",
"plate_ids",
":",
"d",
"[",
"'plates'",
"]",
"[",
"plate_id",
"]",
".",
"append",
"(",
"{",
"'id'",
":",
"node_id",
",",
"'type'",
":",
"'node'",
"}",
")",
"for",
"factor",
"in",
"self",
".",
"factors",
":",
"tool",
"=",
"str",
"(",
"factor",
".",
"tool",
")",
"if",
"tool_long_names",
"else",
"factor",
".",
"tool",
".",
"name",
"try",
":",
"sources",
"=",
"[",
"s",
".",
"node_id",
"for",
"s",
"in",
"factor",
".",
"sources",
"]",
"except",
"AttributeError",
":",
"if",
"factor",
".",
"source",
":",
"sources",
"=",
"[",
"factor",
".",
"source",
".",
"node_id",
"]",
"else",
":",
"sources",
"=",
"[",
"]",
"d",
"[",
"'factors'",
"]",
".",
"append",
"(",
"{",
"'id'",
":",
"tool",
",",
"'sources'",
":",
"sources",
",",
"'sink'",
":",
"factor",
".",
"sink",
".",
"node_id",
"}",
")",
"try",
":",
"if",
"factor",
".",
"plates",
":",
"for",
"plate",
"in",
"factor",
".",
"plates",
":",
"d",
"[",
"'plates'",
"]",
"[",
"plate",
".",
"plate_id",
"]",
".",
"append",
"(",
"{",
"'id'",
":",
"tool",
",",
"'type'",
":",
"'factor'",
"}",
")",
"else",
":",
"d",
"[",
"'plates'",
"]",
"[",
"'root'",
"]",
".",
"append",
"(",
"{",
"'id'",
":",
"tool",
",",
"'type'",
":",
"'factor'",
"}",
")",
"except",
"AttributeError",
":",
"pass",
"d",
"[",
"'plates'",
"]",
"=",
"dict",
"(",
"d",
"[",
"'plates'",
"]",
")",
"return",
"d"
] | Get a representation of the workflow as a dictionary for display purposes
:param tool_long_names: Indicates whether to use long names, such as
SplitterFromStream(element=None, use_mapping_keys_only=True)
or short names, such as
splitter_from_stream
:type tool_long_names: bool
:return: The dictionary of nodes, factors and plates | [
"Get",
"a",
"representation",
"of",
"the",
"workflow",
"as",
"a",
"dictionary",
"for",
"display",
"purposes"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L510-L553 | train |
IRC-SPHERE/HyperStream | hyperstream/workflow/workflow.py | Workflow.to_json | def to_json(self, formatter=None, tool_long_names=True, **kwargs):
"""
Get a JSON representation of the workflow
:param tool_long_names: Indicates whether to use long names, such as
SplitterFromStream(element=None, use_mapping_keys_only=True)
or short names, such as
splitter_from_stream
:param formatter: The formatting function
:param kwargs: Keyword arguments for the json output
:return: A JSON string
"""
d = self.to_dict(tool_long_names=tool_long_names)
if formatter:
d = formatter(d)
return json.dumps(d, **kwargs) | python | def to_json(self, formatter=None, tool_long_names=True, **kwargs):
"""
Get a JSON representation of the workflow
:param tool_long_names: Indicates whether to use long names, such as
SplitterFromStream(element=None, use_mapping_keys_only=True)
or short names, such as
splitter_from_stream
:param formatter: The formatting function
:param kwargs: Keyword arguments for the json output
:return: A JSON string
"""
d = self.to_dict(tool_long_names=tool_long_names)
if formatter:
d = formatter(d)
return json.dumps(d, **kwargs) | [
"def",
"to_json",
"(",
"self",
",",
"formatter",
"=",
"None",
",",
"tool_long_names",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"d",
"=",
"self",
".",
"to_dict",
"(",
"tool_long_names",
"=",
"tool_long_names",
")",
"if",
"formatter",
":",
"d",
"=",
"formatter",
"(",
"d",
")",
"return",
"json",
".",
"dumps",
"(",
"d",
",",
"*",
"*",
"kwargs",
")"
] | Get a JSON representation of the workflow
:param tool_long_names: Indicates whether to use long names, such as
SplitterFromStream(element=None, use_mapping_keys_only=True)
or short names, such as
splitter_from_stream
:param formatter: The formatting function
:param kwargs: Keyword arguments for the json output
:return: A JSON string | [
"Get",
"a",
"JSON",
"representation",
"of",
"the",
"workflow"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L555-L570 | train |
IRC-SPHERE/HyperStream | hyperstream/tool/base_tool.py | BaseTool.parameters_dict | def parameters_dict(self):
"""
Get the tool parameters as a simple dictionary
:return: The tool parameters
"""
d = {}
for k, v in self.__dict__.items():
if not k.startswith("_"):
d[k] = v
return d | python | def parameters_dict(self):
"""
Get the tool parameters as a simple dictionary
:return: The tool parameters
"""
d = {}
for k, v in self.__dict__.items():
if not k.startswith("_"):
d[k] = v
return d | [
"def",
"parameters_dict",
"(",
"self",
")",
":",
"d",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"self",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"if",
"not",
"k",
".",
"startswith",
"(",
"\"_\"",
")",
":",
"d",
"[",
"k",
"]",
"=",
"v",
"return",
"d"
] | Get the tool parameters as a simple dictionary
:return: The tool parameters | [
"Get",
"the",
"tool",
"parameters",
"as",
"a",
"simple",
"dictionary"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/tool/base_tool.py#L85-L95 | train |
IRC-SPHERE/HyperStream | hyperstream/tool/base_tool.py | BaseTool.parameters | def parameters(self):
"""
Get the tool parameters
:return: The tool parameters along with additional information (whether they are functions or sets)
"""
parameters = []
for k, v in self.__dict__.items():
if k.startswith("_"):
continue
is_function = False
is_set = False
if callable(v):
value = pickle.dumps(func_dump(v))
is_function = True
elif isinstance(v, set):
value = list(v)
is_set = True
else:
value = v
parameters.append(dict(
key=k,
value=value,
is_function=is_function,
is_set=is_set
))
return parameters | python | def parameters(self):
"""
Get the tool parameters
:return: The tool parameters along with additional information (whether they are functions or sets)
"""
parameters = []
for k, v in self.__dict__.items():
if k.startswith("_"):
continue
is_function = False
is_set = False
if callable(v):
value = pickle.dumps(func_dump(v))
is_function = True
elif isinstance(v, set):
value = list(v)
is_set = True
else:
value = v
parameters.append(dict(
key=k,
value=value,
is_function=is_function,
is_set=is_set
))
return parameters | [
"def",
"parameters",
"(",
"self",
")",
":",
"parameters",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"self",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"if",
"k",
".",
"startswith",
"(",
"\"_\"",
")",
":",
"continue",
"is_function",
"=",
"False",
"is_set",
"=",
"False",
"if",
"callable",
"(",
"v",
")",
":",
"value",
"=",
"pickle",
".",
"dumps",
"(",
"func_dump",
"(",
"v",
")",
")",
"is_function",
"=",
"True",
"elif",
"isinstance",
"(",
"v",
",",
"set",
")",
":",
"value",
"=",
"list",
"(",
"v",
")",
"is_set",
"=",
"True",
"else",
":",
"value",
"=",
"v",
"parameters",
".",
"append",
"(",
"dict",
"(",
"key",
"=",
"k",
",",
"value",
"=",
"value",
",",
"is_function",
"=",
"is_function",
",",
"is_set",
"=",
"is_set",
")",
")",
"return",
"parameters"
] | Get the tool parameters
:return: The tool parameters along with additional information (whether they are functions or sets) | [
"Get",
"the",
"tool",
"parameters"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/tool/base_tool.py#L98-L128 | train |
IRC-SPHERE/HyperStream | hyperstream/tool/base_tool.py | BaseTool.parameters_from_model | def parameters_from_model(parameters_model):
"""
Get the tool parameters model from dictionaries
:param parameters_model: The parameters as a mongoengine model
:return: The tool parameters as a dictionary
"""
parameters = {}
for p in parameters_model:
if p.is_function:
code, defaults, closure = pickle.loads(p.value)
parameters[p.key] = func_load(code, defaults, closure, globs=globals())
elif p.is_set:
parameters[p.key] = set(p.value)
else:
parameters[p.key] = p.value
return parameters | python | def parameters_from_model(parameters_model):
"""
Get the tool parameters model from dictionaries
:param parameters_model: The parameters as a mongoengine model
:return: The tool parameters as a dictionary
"""
parameters = {}
for p in parameters_model:
if p.is_function:
code, defaults, closure = pickle.loads(p.value)
parameters[p.key] = func_load(code, defaults, closure, globs=globals())
elif p.is_set:
parameters[p.key] = set(p.value)
else:
parameters[p.key] = p.value
return parameters | [
"def",
"parameters_from_model",
"(",
"parameters_model",
")",
":",
"parameters",
"=",
"{",
"}",
"for",
"p",
"in",
"parameters_model",
":",
"if",
"p",
".",
"is_function",
":",
"code",
",",
"defaults",
",",
"closure",
"=",
"pickle",
".",
"loads",
"(",
"p",
".",
"value",
")",
"parameters",
"[",
"p",
".",
"key",
"]",
"=",
"func_load",
"(",
"code",
",",
"defaults",
",",
"closure",
",",
"globs",
"=",
"globals",
"(",
")",
")",
"elif",
"p",
".",
"is_set",
":",
"parameters",
"[",
"p",
".",
"key",
"]",
"=",
"set",
"(",
"p",
".",
"value",
")",
"else",
":",
"parameters",
"[",
"p",
".",
"key",
"]",
"=",
"p",
".",
"value",
"return",
"parameters"
] | Get the tool parameters model from dictionaries
:param parameters_model: The parameters as a mongoengine model
:return: The tool parameters as a dictionary | [
"Get",
"the",
"tool",
"parameters",
"model",
"from",
"dictionaries"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/tool/base_tool.py#L131-L147 | train |
IRC-SPHERE/HyperStream | hyperstream/tool/base_tool.py | BaseTool.get_model | def get_model(self):
"""
Gets the mongoengine model for this tool, which serializes parameters that are functions
:return: The mongoengine model. TODO: Note that the tool version is currently incorrect (0.0.0)
"""
return ToolModel(
name=self.name,
version="0.0.0",
parameters=self.parameters_from_dicts(self.parameters)
) | python | def get_model(self):
"""
Gets the mongoengine model for this tool, which serializes parameters that are functions
:return: The mongoengine model. TODO: Note that the tool version is currently incorrect (0.0.0)
"""
return ToolModel(
name=self.name,
version="0.0.0",
parameters=self.parameters_from_dicts(self.parameters)
) | [
"def",
"get_model",
"(",
"self",
")",
":",
"return",
"ToolModel",
"(",
"name",
"=",
"self",
".",
"name",
",",
"version",
"=",
"\"0.0.0\"",
",",
"parameters",
"=",
"self",
".",
"parameters_from_dicts",
"(",
"self",
".",
"parameters",
")",
")"
] | Gets the mongoengine model for this tool, which serializes parameters that are functions
:return: The mongoengine model. TODO: Note that the tool version is currently incorrect (0.0.0) | [
"Gets",
"the",
"mongoengine",
"model",
"for",
"this",
"tool",
"which",
"serializes",
"parameters",
"that",
"are",
"functions"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/tool/base_tool.py#L159-L170 | train |
IRC-SPHERE/HyperStream | hyperstream/tool/base_tool.py | BaseTool.write_to_history | def write_to_history(**kwargs):
"""
Write to the history of executions of this tool
:param kwargs: keyword arguments describing the executions
:return: None
"""
from hyperstream import HyperStream
hs = HyperStream(loglevel=logging.CRITICAL, file_logger=False, console_logger=False, mqtt_logger=None)
if hs.current_session:
hs.current_session.write_to_history(**kwargs) | python | def write_to_history(**kwargs):
"""
Write to the history of executions of this tool
:param kwargs: keyword arguments describing the executions
:return: None
"""
from hyperstream import HyperStream
hs = HyperStream(loglevel=logging.CRITICAL, file_logger=False, console_logger=False, mqtt_logger=None)
if hs.current_session:
hs.current_session.write_to_history(**kwargs) | [
"def",
"write_to_history",
"(",
"*",
"*",
"kwargs",
")",
":",
"from",
"hyperstream",
"import",
"HyperStream",
"hs",
"=",
"HyperStream",
"(",
"loglevel",
"=",
"logging",
".",
"CRITICAL",
",",
"file_logger",
"=",
"False",
",",
"console_logger",
"=",
"False",
",",
"mqtt_logger",
"=",
"None",
")",
"if",
"hs",
".",
"current_session",
":",
"hs",
".",
"current_session",
".",
"write_to_history",
"(",
"*",
"*",
"kwargs",
")"
] | Write to the history of executions of this tool
:param kwargs: keyword arguments describing the executions
:return: None | [
"Write",
"to",
"the",
"history",
"of",
"executions",
"of",
"this",
"tool"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/tool/base_tool.py#L173-L183 | train |
tamasgal/km3pipe | km3modules/plot.py | plot_dom_parameters | def plot_dom_parameters(
data,
detector,
filename,
label,
title,
vmin=0.0,
vmax=10.0,
cmap='RdYlGn_r',
under='deepskyblue',
over='deeppink',
underfactor=1.0,
overfactor=1.0,
missing='lightgray',
hide_limits=False
):
"""Creates a plot in the classical monitoring.km3net.de style.
Parameters
----------
data: dict((du, floor) -> value)
detector: km3pipe.hardware.Detector() instance
filename: filename or filepath
label: str
title: str
underfactor: a scale factor for the points used for underflow values
overfactor: a scale factor for the points used for overflow values
hide_limits: do not show under/overflows in the plot
"""
x, y, _ = zip(*detector.doms.values())
fig, ax = plt.subplots(figsize=(10, 6))
cmap = plt.get_cmap(cmap)
cmap.set_over(over, 1.0)
cmap.set_under(under, 1.0)
m_size = 100
scatter_args = {
'edgecolors': 'None',
'vmin': vmin,
'vmax': vmax,
}
sc_inactive = ax.scatter(
x, y, c=missing, label='missing', s=m_size * 0.9, **scatter_args
)
xa, ya = map(np.array, zip(*data.keys()))
zs = np.array(list(data.values()))
in_range_idx = np.logical_and(zs >= vmin, zs <= vmax)
sc = ax.scatter(
xa[in_range_idx],
ya[in_range_idx],
c=zs[in_range_idx],
cmap=cmap,
s=m_size,
**scatter_args
)
if not hide_limits:
under_idx = zs < vmin
ax.scatter(
xa[under_idx],
ya[under_idx],
c=under,
label='< {0}'.format(vmin),
s=m_size * underfactor,
**scatter_args
)
over_idx = zs > vmax
ax.scatter(
xa[over_idx],
ya[over_idx],
c=over,
label='> {0}'.format(vmax),
s=m_size * overfactor,
**scatter_args
)
cb = plt.colorbar(sc)
cb.set_label(label)
ax.set_title(
"{0}\n{1} UTC".format(title,
datetime.utcnow().strftime("%c"))
)
ax.set_xlabel("DU")
ax.set_ylabel("DOM")
ax.set_ylim(-2)
ax.set_yticks(range(1, 18 + 1))
major_locator = pylab.MaxNLocator(integer=True)
sc_inactive.axes.xaxis.set_major_locator(major_locator)
ax.legend(
bbox_to_anchor=(0., -.16, 1., .102),
loc=1,
ncol=2,
mode="expand",
borderaxespad=0.
)
fig.tight_layout()
plt.savefig(filename, dpi=120, bbox_inches="tight")
plt.close('all') | python | def plot_dom_parameters(
data,
detector,
filename,
label,
title,
vmin=0.0,
vmax=10.0,
cmap='RdYlGn_r',
under='deepskyblue',
over='deeppink',
underfactor=1.0,
overfactor=1.0,
missing='lightgray',
hide_limits=False
):
"""Creates a plot in the classical monitoring.km3net.de style.
Parameters
----------
data: dict((du, floor) -> value)
detector: km3pipe.hardware.Detector() instance
filename: filename or filepath
label: str
title: str
underfactor: a scale factor for the points used for underflow values
overfactor: a scale factor for the points used for overflow values
hide_limits: do not show under/overflows in the plot
"""
x, y, _ = zip(*detector.doms.values())
fig, ax = plt.subplots(figsize=(10, 6))
cmap = plt.get_cmap(cmap)
cmap.set_over(over, 1.0)
cmap.set_under(under, 1.0)
m_size = 100
scatter_args = {
'edgecolors': 'None',
'vmin': vmin,
'vmax': vmax,
}
sc_inactive = ax.scatter(
x, y, c=missing, label='missing', s=m_size * 0.9, **scatter_args
)
xa, ya = map(np.array, zip(*data.keys()))
zs = np.array(list(data.values()))
in_range_idx = np.logical_and(zs >= vmin, zs <= vmax)
sc = ax.scatter(
xa[in_range_idx],
ya[in_range_idx],
c=zs[in_range_idx],
cmap=cmap,
s=m_size,
**scatter_args
)
if not hide_limits:
under_idx = zs < vmin
ax.scatter(
xa[under_idx],
ya[under_idx],
c=under,
label='< {0}'.format(vmin),
s=m_size * underfactor,
**scatter_args
)
over_idx = zs > vmax
ax.scatter(
xa[over_idx],
ya[over_idx],
c=over,
label='> {0}'.format(vmax),
s=m_size * overfactor,
**scatter_args
)
cb = plt.colorbar(sc)
cb.set_label(label)
ax.set_title(
"{0}\n{1} UTC".format(title,
datetime.utcnow().strftime("%c"))
)
ax.set_xlabel("DU")
ax.set_ylabel("DOM")
ax.set_ylim(-2)
ax.set_yticks(range(1, 18 + 1))
major_locator = pylab.MaxNLocator(integer=True)
sc_inactive.axes.xaxis.set_major_locator(major_locator)
ax.legend(
bbox_to_anchor=(0., -.16, 1., .102),
loc=1,
ncol=2,
mode="expand",
borderaxespad=0.
)
fig.tight_layout()
plt.savefig(filename, dpi=120, bbox_inches="tight")
plt.close('all') | [
"def",
"plot_dom_parameters",
"(",
"data",
",",
"detector",
",",
"filename",
",",
"label",
",",
"title",
",",
"vmin",
"=",
"0.0",
",",
"vmax",
"=",
"10.0",
",",
"cmap",
"=",
"'RdYlGn_r'",
",",
"under",
"=",
"'deepskyblue'",
",",
"over",
"=",
"'deeppink'",
",",
"underfactor",
"=",
"1.0",
",",
"overfactor",
"=",
"1.0",
",",
"missing",
"=",
"'lightgray'",
",",
"hide_limits",
"=",
"False",
")",
":",
"x",
",",
"y",
",",
"_",
"=",
"zip",
"(",
"*",
"detector",
".",
"doms",
".",
"values",
"(",
")",
")",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"figsize",
"=",
"(",
"10",
",",
"6",
")",
")",
"cmap",
"=",
"plt",
".",
"get_cmap",
"(",
"cmap",
")",
"cmap",
".",
"set_over",
"(",
"over",
",",
"1.0",
")",
"cmap",
".",
"set_under",
"(",
"under",
",",
"1.0",
")",
"m_size",
"=",
"100",
"scatter_args",
"=",
"{",
"'edgecolors'",
":",
"'None'",
",",
"'vmin'",
":",
"vmin",
",",
"'vmax'",
":",
"vmax",
",",
"}",
"sc_inactive",
"=",
"ax",
".",
"scatter",
"(",
"x",
",",
"y",
",",
"c",
"=",
"missing",
",",
"label",
"=",
"'missing'",
",",
"s",
"=",
"m_size",
"*",
"0.9",
",",
"*",
"*",
"scatter_args",
")",
"xa",
",",
"ya",
"=",
"map",
"(",
"np",
".",
"array",
",",
"zip",
"(",
"*",
"data",
".",
"keys",
"(",
")",
")",
")",
"zs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"data",
".",
"values",
"(",
")",
")",
")",
"in_range_idx",
"=",
"np",
".",
"logical_and",
"(",
"zs",
">=",
"vmin",
",",
"zs",
"<=",
"vmax",
")",
"sc",
"=",
"ax",
".",
"scatter",
"(",
"xa",
"[",
"in_range_idx",
"]",
",",
"ya",
"[",
"in_range_idx",
"]",
",",
"c",
"=",
"zs",
"[",
"in_range_idx",
"]",
",",
"cmap",
"=",
"cmap",
",",
"s",
"=",
"m_size",
",",
"*",
"*",
"scatter_args",
")",
"if",
"not",
"hide_limits",
":",
"under_idx",
"=",
"zs",
"<",
"vmin",
"ax",
".",
"scatter",
"(",
"xa",
"[",
"under_idx",
"]",
",",
"ya",
"[",
"under_idx",
"]",
",",
"c",
"=",
"under",
",",
"label",
"=",
"'< {0}'",
".",
"format",
"(",
"vmin",
")",
",",
"s",
"=",
"m_size",
"*",
"underfactor",
",",
"*",
"*",
"scatter_args",
")",
"over_idx",
"=",
"zs",
">",
"vmax",
"ax",
".",
"scatter",
"(",
"xa",
"[",
"over_idx",
"]",
",",
"ya",
"[",
"over_idx",
"]",
",",
"c",
"=",
"over",
",",
"label",
"=",
"'> {0}'",
".",
"format",
"(",
"vmax",
")",
",",
"s",
"=",
"m_size",
"*",
"overfactor",
",",
"*",
"*",
"scatter_args",
")",
"cb",
"=",
"plt",
".",
"colorbar",
"(",
"sc",
")",
"cb",
".",
"set_label",
"(",
"label",
")",
"ax",
".",
"set_title",
"(",
"\"{0}\\n{1} UTC\"",
".",
"format",
"(",
"title",
",",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"\"%c\"",
")",
")",
")",
"ax",
".",
"set_xlabel",
"(",
"\"DU\"",
")",
"ax",
".",
"set_ylabel",
"(",
"\"DOM\"",
")",
"ax",
".",
"set_ylim",
"(",
"-",
"2",
")",
"ax",
".",
"set_yticks",
"(",
"range",
"(",
"1",
",",
"18",
"+",
"1",
")",
")",
"major_locator",
"=",
"pylab",
".",
"MaxNLocator",
"(",
"integer",
"=",
"True",
")",
"sc_inactive",
".",
"axes",
".",
"xaxis",
".",
"set_major_locator",
"(",
"major_locator",
")",
"ax",
".",
"legend",
"(",
"bbox_to_anchor",
"=",
"(",
"0.",
",",
"-",
".16",
",",
"1.",
",",
".102",
")",
",",
"loc",
"=",
"1",
",",
"ncol",
"=",
"2",
",",
"mode",
"=",
"\"expand\"",
",",
"borderaxespad",
"=",
"0.",
")",
"fig",
".",
"tight_layout",
"(",
")",
"plt",
".",
"savefig",
"(",
"filename",
",",
"dpi",
"=",
"120",
",",
"bbox_inches",
"=",
"\"tight\"",
")",
"plt",
".",
"close",
"(",
"'all'",
")"
] | Creates a plot in the classical monitoring.km3net.de style.
Parameters
----------
data: dict((du, floor) -> value)
detector: km3pipe.hardware.Detector() instance
filename: filename or filepath
label: str
title: str
underfactor: a scale factor for the points used for underflow values
overfactor: a scale factor for the points used for overflow values
hide_limits: do not show under/overflows in the plot | [
"Creates",
"a",
"plot",
"in",
"the",
"classical",
"monitoring",
".",
"km3net",
".",
"de",
"style",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/plot.py#L27-L129 | train |
tamasgal/km3pipe | km3modules/plot.py | make_dom_map | def make_dom_map(pmt_directions, values, nside=512, d=0.2, smoothing=0.1):
"""Create a mollweide projection of a DOM with given PMTs.
The output can be used to call the `healpy.mollview` function.
"""
import healpy as hp
discs = [hp.query_disc(nside, dir, 0.2) for dir in pmt_directions]
npix = hp.nside2npix(nside)
pixels = np.zeros(npix)
for disc, value in zip(discs, values):
for d in disc:
pixels[d] = value
if smoothing > 0:
return hp.sphtfunc.smoothing(pixels, fwhm=smoothing, iter=1)
return pixels | python | def make_dom_map(pmt_directions, values, nside=512, d=0.2, smoothing=0.1):
"""Create a mollweide projection of a DOM with given PMTs.
The output can be used to call the `healpy.mollview` function.
"""
import healpy as hp
discs = [hp.query_disc(nside, dir, 0.2) for dir in pmt_directions]
npix = hp.nside2npix(nside)
pixels = np.zeros(npix)
for disc, value in zip(discs, values):
for d in disc:
pixels[d] = value
if smoothing > 0:
return hp.sphtfunc.smoothing(pixels, fwhm=smoothing, iter=1)
return pixels | [
"def",
"make_dom_map",
"(",
"pmt_directions",
",",
"values",
",",
"nside",
"=",
"512",
",",
"d",
"=",
"0.2",
",",
"smoothing",
"=",
"0.1",
")",
":",
"import",
"healpy",
"as",
"hp",
"discs",
"=",
"[",
"hp",
".",
"query_disc",
"(",
"nside",
",",
"dir",
",",
"0.2",
")",
"for",
"dir",
"in",
"pmt_directions",
"]",
"npix",
"=",
"hp",
".",
"nside2npix",
"(",
"nside",
")",
"pixels",
"=",
"np",
".",
"zeros",
"(",
"npix",
")",
"for",
"disc",
",",
"value",
"in",
"zip",
"(",
"discs",
",",
"values",
")",
":",
"for",
"d",
"in",
"disc",
":",
"pixels",
"[",
"d",
"]",
"=",
"value",
"if",
"smoothing",
">",
"0",
":",
"return",
"hp",
".",
"sphtfunc",
".",
"smoothing",
"(",
"pixels",
",",
"fwhm",
"=",
"smoothing",
",",
"iter",
"=",
"1",
")",
"return",
"pixels"
] | Create a mollweide projection of a DOM with given PMTs.
The output can be used to call the `healpy.mollview` function. | [
"Create",
"a",
"mollweide",
"projection",
"of",
"a",
"DOM",
"with",
"given",
"PMTs",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/plot.py#L132-L146 | train |
IRC-SPHERE/HyperStream | hyperstream/stream/stream.py | Stream.calculated_intervals | def calculated_intervals(self, value):
"""
Set the calculated intervals
This will be written to the stream_status collection if it's in the database channel
:param value: The calculated intervals
:type value: TimeIntervals, TimeInterval, list[TimeInterval]
"""
if not value:
self._calculated_intervals = TimeIntervals()
return
if isinstance(value, TimeInterval):
value = TimeIntervals([value])
elif isinstance(value, TimeIntervals):
pass
elif isinstance(value, list):
value = TimeIntervals(value)
else:
raise TypeError("Expected list/TimeInterval/TimeIntervals, got {}".format(type(value)))
for interval in value:
if interval.end > utcnow():
raise ValueError("Calculated intervals should not be in the future")
self._calculated_intervals = value | python | def calculated_intervals(self, value):
"""
Set the calculated intervals
This will be written to the stream_status collection if it's in the database channel
:param value: The calculated intervals
:type value: TimeIntervals, TimeInterval, list[TimeInterval]
"""
if not value:
self._calculated_intervals = TimeIntervals()
return
if isinstance(value, TimeInterval):
value = TimeIntervals([value])
elif isinstance(value, TimeIntervals):
pass
elif isinstance(value, list):
value = TimeIntervals(value)
else:
raise TypeError("Expected list/TimeInterval/TimeIntervals, got {}".format(type(value)))
for interval in value:
if interval.end > utcnow():
raise ValueError("Calculated intervals should not be in the future")
self._calculated_intervals = value | [
"def",
"calculated_intervals",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"value",
":",
"self",
".",
"_calculated_intervals",
"=",
"TimeIntervals",
"(",
")",
"return",
"if",
"isinstance",
"(",
"value",
",",
"TimeInterval",
")",
":",
"value",
"=",
"TimeIntervals",
"(",
"[",
"value",
"]",
")",
"elif",
"isinstance",
"(",
"value",
",",
"TimeIntervals",
")",
":",
"pass",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"TimeIntervals",
"(",
"value",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Expected list/TimeInterval/TimeIntervals, got {}\"",
".",
"format",
"(",
"type",
"(",
"value",
")",
")",
")",
"for",
"interval",
"in",
"value",
":",
"if",
"interval",
".",
"end",
">",
"utcnow",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Calculated intervals should not be in the future\"",
")",
"self",
".",
"_calculated_intervals",
"=",
"value"
] | Set the calculated intervals
This will be written to the stream_status collection if it's in the database channel
:param value: The calculated intervals
:type value: TimeIntervals, TimeInterval, list[TimeInterval] | [
"Set",
"the",
"calculated",
"intervals",
"This",
"will",
"be",
"written",
"to",
"the",
"stream_status",
"collection",
"if",
"it",
"s",
"in",
"the",
"database",
"channel"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/stream/stream.py#L115-L140 | train |
IRC-SPHERE/HyperStream | hyperstream/stream/stream.py | Stream.purge | def purge(self):
"""
Purge the stream. This removes all data and clears the calculated intervals
:return: None
"""
self.channel.purge_stream(self.stream_id, remove_definition=False, sandbox=None) | python | def purge(self):
"""
Purge the stream. This removes all data and clears the calculated intervals
:return: None
"""
self.channel.purge_stream(self.stream_id, remove_definition=False, sandbox=None) | [
"def",
"purge",
"(",
"self",
")",
":",
"self",
".",
"channel",
".",
"purge_stream",
"(",
"self",
".",
"stream_id",
",",
"remove_definition",
"=",
"False",
",",
"sandbox",
"=",
"None",
")"
] | Purge the stream. This removes all data and clears the calculated intervals
:return: None | [
"Purge",
"the",
"stream",
".",
"This",
"removes",
"all",
"data",
"and",
"clears",
"the",
"calculated",
"intervals"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/stream/stream.py#L142-L148 | train |
IRC-SPHERE/HyperStream | hyperstream/stream/stream.py | Stream.window | def window(self, time_interval=None, force_calculation=False):
"""
Gets a view on this stream for the time interval given
:param time_interval: either a TimeInterval object or (start, end) tuple of type str or datetime
:param force_calculation: Whether we should force calculation for this stream view if data does not exist
:type time_interval: None | Iterable | TimeInterval
:type force_calculation: bool
:return: a stream view object
"""
if not time_interval:
if self.calculated_intervals:
time_interval = self.calculated_intervals[-1]
else:
raise ValueError("No calculations have been performed and no time interval was provided")
elif isinstance(time_interval, TimeInterval):
time_interval = TimeInterval(time_interval.start, time_interval.end)
elif isinstance(time_interval, Iterable):
time_interval = parse_time_tuple(*time_interval)
if isinstance(time_interval, RelativeTimeInterval):
raise NotImplementedError
elif isinstance(time_interval, RelativeTimeInterval):
raise NotImplementedError
else:
raise TypeError("Expected TimeInterval or (start, end) tuple of type str or datetime, got {}"
.format(type(time_interval)))
return StreamView(stream=self, time_interval=time_interval, force_calculation=force_calculation) | python | def window(self, time_interval=None, force_calculation=False):
"""
Gets a view on this stream for the time interval given
:param time_interval: either a TimeInterval object or (start, end) tuple of type str or datetime
:param force_calculation: Whether we should force calculation for this stream view if data does not exist
:type time_interval: None | Iterable | TimeInterval
:type force_calculation: bool
:return: a stream view object
"""
if not time_interval:
if self.calculated_intervals:
time_interval = self.calculated_intervals[-1]
else:
raise ValueError("No calculations have been performed and no time interval was provided")
elif isinstance(time_interval, TimeInterval):
time_interval = TimeInterval(time_interval.start, time_interval.end)
elif isinstance(time_interval, Iterable):
time_interval = parse_time_tuple(*time_interval)
if isinstance(time_interval, RelativeTimeInterval):
raise NotImplementedError
elif isinstance(time_interval, RelativeTimeInterval):
raise NotImplementedError
else:
raise TypeError("Expected TimeInterval or (start, end) tuple of type str or datetime, got {}"
.format(type(time_interval)))
return StreamView(stream=self, time_interval=time_interval, force_calculation=force_calculation) | [
"def",
"window",
"(",
"self",
",",
"time_interval",
"=",
"None",
",",
"force_calculation",
"=",
"False",
")",
":",
"if",
"not",
"time_interval",
":",
"if",
"self",
".",
"calculated_intervals",
":",
"time_interval",
"=",
"self",
".",
"calculated_intervals",
"[",
"-",
"1",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"No calculations have been performed and no time interval was provided\"",
")",
"elif",
"isinstance",
"(",
"time_interval",
",",
"TimeInterval",
")",
":",
"time_interval",
"=",
"TimeInterval",
"(",
"time_interval",
".",
"start",
",",
"time_interval",
".",
"end",
")",
"elif",
"isinstance",
"(",
"time_interval",
",",
"Iterable",
")",
":",
"time_interval",
"=",
"parse_time_tuple",
"(",
"*",
"time_interval",
")",
"if",
"isinstance",
"(",
"time_interval",
",",
"RelativeTimeInterval",
")",
":",
"raise",
"NotImplementedError",
"elif",
"isinstance",
"(",
"time_interval",
",",
"RelativeTimeInterval",
")",
":",
"raise",
"NotImplementedError",
"else",
":",
"raise",
"TypeError",
"(",
"\"Expected TimeInterval or (start, end) tuple of type str or datetime, got {}\"",
".",
"format",
"(",
"type",
"(",
"time_interval",
")",
")",
")",
"return",
"StreamView",
"(",
"stream",
"=",
"self",
",",
"time_interval",
"=",
"time_interval",
",",
"force_calculation",
"=",
"force_calculation",
")"
] | Gets a view on this stream for the time interval given
:param time_interval: either a TimeInterval object or (start, end) tuple of type str or datetime
:param force_calculation: Whether we should force calculation for this stream view if data does not exist
:type time_interval: None | Iterable | TimeInterval
:type force_calculation: bool
:return: a stream view object | [
"Gets",
"a",
"view",
"on",
"this",
"stream",
"for",
"the",
"time",
"interval",
"given"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/stream/stream.py#L154-L180 | train |
IRC-SPHERE/HyperStream | hyperstream/stream/stream.py | DatabaseStream.load | def load(self):
"""
Load the stream definition from the database
:return: None
"""
with switch_db(StreamDefinitionModel, 'hyperstream'):
self.mongo_model = StreamDefinitionModel.objects.get(__raw__=self.stream_id.as_raw())
self._calculated_intervals = self.mongo_model.get_calculated_intervals() | python | def load(self):
"""
Load the stream definition from the database
:return: None
"""
with switch_db(StreamDefinitionModel, 'hyperstream'):
self.mongo_model = StreamDefinitionModel.objects.get(__raw__=self.stream_id.as_raw())
self._calculated_intervals = self.mongo_model.get_calculated_intervals() | [
"def",
"load",
"(",
"self",
")",
":",
"with",
"switch_db",
"(",
"StreamDefinitionModel",
",",
"'hyperstream'",
")",
":",
"self",
".",
"mongo_model",
"=",
"StreamDefinitionModel",
".",
"objects",
".",
"get",
"(",
"__raw__",
"=",
"self",
".",
"stream_id",
".",
"as_raw",
"(",
")",
")",
"self",
".",
"_calculated_intervals",
"=",
"self",
".",
"mongo_model",
".",
"get_calculated_intervals",
"(",
")"
] | Load the stream definition from the database
:return: None | [
"Load",
"the",
"stream",
"definition",
"from",
"the",
"database"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/stream/stream.py#L212-L220 | train |
IRC-SPHERE/HyperStream | hyperstream/stream/stream.py | DatabaseStream.calculated_intervals | def calculated_intervals(self):
"""
Gets the calculated intervals from the database
:return: The calculated intervals
"""
if self._calculated_intervals is None:
logging.debug("get calculated intervals")
self.load()
return self.mongo_model.get_calculated_intervals()
return self._calculated_intervals | python | def calculated_intervals(self):
"""
Gets the calculated intervals from the database
:return: The calculated intervals
"""
if self._calculated_intervals is None:
logging.debug("get calculated intervals")
self.load()
return self.mongo_model.get_calculated_intervals()
return self._calculated_intervals | [
"def",
"calculated_intervals",
"(",
"self",
")",
":",
"if",
"self",
".",
"_calculated_intervals",
"is",
"None",
":",
"logging",
".",
"debug",
"(",
"\"get calculated intervals\"",
")",
"self",
".",
"load",
"(",
")",
"return",
"self",
".",
"mongo_model",
".",
"get_calculated_intervals",
"(",
")",
"return",
"self",
".",
"_calculated_intervals"
] | Gets the calculated intervals from the database
:return: The calculated intervals | [
"Gets",
"the",
"calculated",
"intervals",
"from",
"the",
"database"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/stream/stream.py#L233-L243 | train |
tamasgal/km3pipe | km3pipe/io/__init__.py | GenericPump | def GenericPump(filenames, use_jppy=False, name="GenericPump", **kwargs):
"""A generic pump which utilises the appropriate pump."""
if isinstance(filenames, str):
filenames = [filenames]
try:
iter(filenames)
except TypeError:
log.critical("Don't know how to iterate through filenames.")
raise TypeError("Invalid filenames.")
extensions = set(os.path.splitext(fn)[1] for fn in filenames)
if len(extensions) > 1:
log.critical("Mixed filetypes, please use only files of the same type")
raise IOError("Mixed filetypes.")
extension = list(extensions)[0]
io = {
'.evt': EvtPump,
'.h5': HDF5Pump,
'.root': EventPump if use_jppy else AanetPump,
'.dat': DAQPump,
'.dqd': CLBPump,
}
if extension not in io:
log.critical(
"No pump found for file extension '{0}'".format(extension)
)
raise ValueError("Unknown filetype")
missing_files = [fn for fn in filenames if not os.path.exists(fn)]
if missing_files:
if len(missing_files) == len(filenames):
message = "None of the given files could be found."
log.critical(message)
raise SystemExit(message)
else:
log.warning(
"The following files are missing and ignored: {}".format(
', '.join(missing_files)
)
)
input_files = set(filenames) - set(missing_files)
if len(input_files) == 1:
return io[extension](filename=filenames[0], name=name, **kwargs)
else:
return io[extension](filenames=filenames, name=name, **kwargs) | python | def GenericPump(filenames, use_jppy=False, name="GenericPump", **kwargs):
"""A generic pump which utilises the appropriate pump."""
if isinstance(filenames, str):
filenames = [filenames]
try:
iter(filenames)
except TypeError:
log.critical("Don't know how to iterate through filenames.")
raise TypeError("Invalid filenames.")
extensions = set(os.path.splitext(fn)[1] for fn in filenames)
if len(extensions) > 1:
log.critical("Mixed filetypes, please use only files of the same type")
raise IOError("Mixed filetypes.")
extension = list(extensions)[0]
io = {
'.evt': EvtPump,
'.h5': HDF5Pump,
'.root': EventPump if use_jppy else AanetPump,
'.dat': DAQPump,
'.dqd': CLBPump,
}
if extension not in io:
log.critical(
"No pump found for file extension '{0}'".format(extension)
)
raise ValueError("Unknown filetype")
missing_files = [fn for fn in filenames if not os.path.exists(fn)]
if missing_files:
if len(missing_files) == len(filenames):
message = "None of the given files could be found."
log.critical(message)
raise SystemExit(message)
else:
log.warning(
"The following files are missing and ignored: {}".format(
', '.join(missing_files)
)
)
input_files = set(filenames) - set(missing_files)
if len(input_files) == 1:
return io[extension](filename=filenames[0], name=name, **kwargs)
else:
return io[extension](filenames=filenames, name=name, **kwargs) | [
"def",
"GenericPump",
"(",
"filenames",
",",
"use_jppy",
"=",
"False",
",",
"name",
"=",
"\"GenericPump\"",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"filenames",
",",
"str",
")",
":",
"filenames",
"=",
"[",
"filenames",
"]",
"try",
":",
"iter",
"(",
"filenames",
")",
"except",
"TypeError",
":",
"log",
".",
"critical",
"(",
"\"Don't know how to iterate through filenames.\"",
")",
"raise",
"TypeError",
"(",
"\"Invalid filenames.\"",
")",
"extensions",
"=",
"set",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"fn",
")",
"[",
"1",
"]",
"for",
"fn",
"in",
"filenames",
")",
"if",
"len",
"(",
"extensions",
")",
">",
"1",
":",
"log",
".",
"critical",
"(",
"\"Mixed filetypes, please use only files of the same type\"",
")",
"raise",
"IOError",
"(",
"\"Mixed filetypes.\"",
")",
"extension",
"=",
"list",
"(",
"extensions",
")",
"[",
"0",
"]",
"io",
"=",
"{",
"'.evt'",
":",
"EvtPump",
",",
"'.h5'",
":",
"HDF5Pump",
",",
"'.root'",
":",
"EventPump",
"if",
"use_jppy",
"else",
"AanetPump",
",",
"'.dat'",
":",
"DAQPump",
",",
"'.dqd'",
":",
"CLBPump",
",",
"}",
"if",
"extension",
"not",
"in",
"io",
":",
"log",
".",
"critical",
"(",
"\"No pump found for file extension '{0}'\"",
".",
"format",
"(",
"extension",
")",
")",
"raise",
"ValueError",
"(",
"\"Unknown filetype\"",
")",
"missing_files",
"=",
"[",
"fn",
"for",
"fn",
"in",
"filenames",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"fn",
")",
"]",
"if",
"missing_files",
":",
"if",
"len",
"(",
"missing_files",
")",
"==",
"len",
"(",
"filenames",
")",
":",
"message",
"=",
"\"None of the given files could be found.\"",
"log",
".",
"critical",
"(",
"message",
")",
"raise",
"SystemExit",
"(",
"message",
")",
"else",
":",
"log",
".",
"warning",
"(",
"\"The following files are missing and ignored: {}\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"missing_files",
")",
")",
")",
"input_files",
"=",
"set",
"(",
"filenames",
")",
"-",
"set",
"(",
"missing_files",
")",
"if",
"len",
"(",
"input_files",
")",
"==",
"1",
":",
"return",
"io",
"[",
"extension",
"]",
"(",
"filename",
"=",
"filenames",
"[",
"0",
"]",
",",
"name",
"=",
"name",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"io",
"[",
"extension",
"]",
"(",
"filenames",
"=",
"filenames",
",",
"name",
"=",
"name",
",",
"*",
"*",
"kwargs",
")"
] | A generic pump which utilises the appropriate pump. | [
"A",
"generic",
"pump",
"which",
"utilises",
"the",
"appropriate",
"pump",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/__init__.py#L36-L87 | train |
tamasgal/km3pipe | km3pipe/io/__init__.py | read_calibration | def read_calibration(
detx=None, det_id=None, from_file=False, det_id_table=None
):
"""Retrive calibration from file, the DB."""
from km3pipe.calib import Calibration # noqa
if not (detx or det_id or from_file):
return None
if detx is not None:
return Calibration(filename=detx)
if from_file:
det_ids = np.unique(det_id_table)
if len(det_ids) > 1:
log.critical("Multiple detector IDs found in events.")
det_id = det_ids[0]
if det_id is not None:
if det_id < 0:
log.warning(
"Negative detector ID found ({0}). This is a MC "
"detector and cannot be retrieved from the DB.".format(det_id)
)
return None
return Calibration(det_id=det_id)
return None | python | def read_calibration(
detx=None, det_id=None, from_file=False, det_id_table=None
):
"""Retrive calibration from file, the DB."""
from km3pipe.calib import Calibration # noqa
if not (detx or det_id or from_file):
return None
if detx is not None:
return Calibration(filename=detx)
if from_file:
det_ids = np.unique(det_id_table)
if len(det_ids) > 1:
log.critical("Multiple detector IDs found in events.")
det_id = det_ids[0]
if det_id is not None:
if det_id < 0:
log.warning(
"Negative detector ID found ({0}). This is a MC "
"detector and cannot be retrieved from the DB.".format(det_id)
)
return None
return Calibration(det_id=det_id)
return None | [
"def",
"read_calibration",
"(",
"detx",
"=",
"None",
",",
"det_id",
"=",
"None",
",",
"from_file",
"=",
"False",
",",
"det_id_table",
"=",
"None",
")",
":",
"from",
"km3pipe",
".",
"calib",
"import",
"Calibration",
"# noqa",
"if",
"not",
"(",
"detx",
"or",
"det_id",
"or",
"from_file",
")",
":",
"return",
"None",
"if",
"detx",
"is",
"not",
"None",
":",
"return",
"Calibration",
"(",
"filename",
"=",
"detx",
")",
"if",
"from_file",
":",
"det_ids",
"=",
"np",
".",
"unique",
"(",
"det_id_table",
")",
"if",
"len",
"(",
"det_ids",
")",
">",
"1",
":",
"log",
".",
"critical",
"(",
"\"Multiple detector IDs found in events.\"",
")",
"det_id",
"=",
"det_ids",
"[",
"0",
"]",
"if",
"det_id",
"is",
"not",
"None",
":",
"if",
"det_id",
"<",
"0",
":",
"log",
".",
"warning",
"(",
"\"Negative detector ID found ({0}). This is a MC \"",
"\"detector and cannot be retrieved from the DB.\"",
".",
"format",
"(",
"det_id",
")",
")",
"return",
"None",
"return",
"Calibration",
"(",
"det_id",
"=",
"det_id",
")",
"return",
"None"
] | Retrive calibration from file, the DB. | [
"Retrive",
"calibration",
"from",
"file",
"the",
"DB",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/__init__.py#L90-L113 | train |
NaPs/Kolekto | kolekto/printer.py | KolektoPrinter.edit | def edit(self, text):
""" Edit a text using an external editor.
"""
if isinstance(text, unicode):
text = text.encode(self._encoding)
if self._editor is None:
printer.p('Warning: no editor found, skipping edit')
return text
with tempfile.NamedTemporaryFile(mode='w+', suffix='kolekto-edit') as ftmp:
ftmp.write(text)
ftmp.flush()
subprocess.Popen([self._editor, ftmp.name]).wait()
ftmp.seek(0)
edited = ftmp.read()
return edited | python | def edit(self, text):
""" Edit a text using an external editor.
"""
if isinstance(text, unicode):
text = text.encode(self._encoding)
if self._editor is None:
printer.p('Warning: no editor found, skipping edit')
return text
with tempfile.NamedTemporaryFile(mode='w+', suffix='kolekto-edit') as ftmp:
ftmp.write(text)
ftmp.flush()
subprocess.Popen([self._editor, ftmp.name]).wait()
ftmp.seek(0)
edited = ftmp.read()
return edited | [
"def",
"edit",
"(",
"self",
",",
"text",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"unicode",
")",
":",
"text",
"=",
"text",
".",
"encode",
"(",
"self",
".",
"_encoding",
")",
"if",
"self",
".",
"_editor",
"is",
"None",
":",
"printer",
".",
"p",
"(",
"'Warning: no editor found, skipping edit'",
")",
"return",
"text",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'w+'",
",",
"suffix",
"=",
"'kolekto-edit'",
")",
"as",
"ftmp",
":",
"ftmp",
".",
"write",
"(",
"text",
")",
"ftmp",
".",
"flush",
"(",
")",
"subprocess",
".",
"Popen",
"(",
"[",
"self",
".",
"_editor",
",",
"ftmp",
".",
"name",
"]",
")",
".",
"wait",
"(",
")",
"ftmp",
".",
"seek",
"(",
"0",
")",
"edited",
"=",
"ftmp",
".",
"read",
"(",
")",
"return",
"edited"
] | Edit a text using an external editor. | [
"Edit",
"a",
"text",
"using",
"an",
"external",
"editor",
"."
] | 29c5469da8782780a06bf9a76c59414bb6fd8fe3 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/printer.py#L276-L290 | train |
jdodds/feather | feather/application.py | Application.register | def register(self, plugin):
"""Take a feather.plugin.Plugin and tell our dispatcher about it.
Plugins are expected to provide a list of the messages that they
listen for and generate. If registering this plugin makes it so we have
at least one plugin listening for and generating our expected messages,
set self.valid to true
"""
self.needed_listeners -= plugin.listeners
self.needed_messengers -= plugin.messengers
if self.needed_messengers == self.needed_listeners == set():
self.valid = True
self.dispatcher.register(plugin) | python | def register(self, plugin):
"""Take a feather.plugin.Plugin and tell our dispatcher about it.
Plugins are expected to provide a list of the messages that they
listen for and generate. If registering this plugin makes it so we have
at least one plugin listening for and generating our expected messages,
set self.valid to true
"""
self.needed_listeners -= plugin.listeners
self.needed_messengers -= plugin.messengers
if self.needed_messengers == self.needed_listeners == set():
self.valid = True
self.dispatcher.register(plugin) | [
"def",
"register",
"(",
"self",
",",
"plugin",
")",
":",
"self",
".",
"needed_listeners",
"-=",
"plugin",
".",
"listeners",
"self",
".",
"needed_messengers",
"-=",
"plugin",
".",
"messengers",
"if",
"self",
".",
"needed_messengers",
"==",
"self",
".",
"needed_listeners",
"==",
"set",
"(",
")",
":",
"self",
".",
"valid",
"=",
"True",
"self",
".",
"dispatcher",
".",
"register",
"(",
"plugin",
")"
] | Take a feather.plugin.Plugin and tell our dispatcher about it.
Plugins are expected to provide a list of the messages that they
listen for and generate. If registering this plugin makes it so we have
at least one plugin listening for and generating our expected messages,
set self.valid to true | [
"Take",
"a",
"feather",
".",
"plugin",
".",
"Plugin",
"and",
"tell",
"our",
"dispatcher",
"about",
"it",
"."
] | 92a9426e692b33c7fddf758df8dbc99a9a1ba8ef | https://github.com/jdodds/feather/blob/92a9426e692b33c7fddf758df8dbc99a9a1ba8ef/feather/application.py#L29-L43 | train |
jdodds/feather | feather/application.py | Application.start | def start(self):
"""If we have a set of plugins that provide our expected listeners and
messengers, tell our dispatcher to start up. Otherwise, raise
InvalidApplication
"""
if not self.valid:
err = ("\nMessengers and listeners that still need set:\n\n"
"messengers : %s\n\n"
"listeners : %s\n")
raise InvalidApplication(err % (self.needed_messengers,
self.needed_listeners))
self.dispatcher.start() | python | def start(self):
"""If we have a set of plugins that provide our expected listeners and
messengers, tell our dispatcher to start up. Otherwise, raise
InvalidApplication
"""
if not self.valid:
err = ("\nMessengers and listeners that still need set:\n\n"
"messengers : %s\n\n"
"listeners : %s\n")
raise InvalidApplication(err % (self.needed_messengers,
self.needed_listeners))
self.dispatcher.start() | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"valid",
":",
"err",
"=",
"(",
"\"\\nMessengers and listeners that still need set:\\n\\n\"",
"\"messengers : %s\\n\\n\"",
"\"listeners : %s\\n\"",
")",
"raise",
"InvalidApplication",
"(",
"err",
"%",
"(",
"self",
".",
"needed_messengers",
",",
"self",
".",
"needed_listeners",
")",
")",
"self",
".",
"dispatcher",
".",
"start",
"(",
")"
] | If we have a set of plugins that provide our expected listeners and
messengers, tell our dispatcher to start up. Otherwise, raise
InvalidApplication | [
"If",
"we",
"have",
"a",
"set",
"of",
"plugins",
"that",
"provide",
"our",
"expected",
"listeners",
"and",
"messengers",
"tell",
"our",
"dispatcher",
"to",
"start",
"up",
".",
"Otherwise",
"raise",
"InvalidApplication"
] | 92a9426e692b33c7fddf758df8dbc99a9a1ba8ef | https://github.com/jdodds/feather/blob/92a9426e692b33c7fddf758df8dbc99a9a1ba8ef/feather/application.py#L45-L56 | train |
baranbartu/rulengine | rulengine/conditions.py | execute_condition | def execute_condition(cond):
"""
Get a rule instance for given operator and
return condition lambda func
"""
condition_method = 'rulengine.conditions.c_{0}_{1}'.format(
cond.data_type, cond.operator)
try:
func = import_class(condition_method)
except AttributeError:
condition_method = 'rulengine.conditions.c_{0}'.format(
cond.data_type)
func = import_class(condition_method)
executable_cond = convert_condition_to_executable(cond)
return func(executable_cond) | python | def execute_condition(cond):
"""
Get a rule instance for given operator and
return condition lambda func
"""
condition_method = 'rulengine.conditions.c_{0}_{1}'.format(
cond.data_type, cond.operator)
try:
func = import_class(condition_method)
except AttributeError:
condition_method = 'rulengine.conditions.c_{0}'.format(
cond.data_type)
func = import_class(condition_method)
executable_cond = convert_condition_to_executable(cond)
return func(executable_cond) | [
"def",
"execute_condition",
"(",
"cond",
")",
":",
"condition_method",
"=",
"'rulengine.conditions.c_{0}_{1}'",
".",
"format",
"(",
"cond",
".",
"data_type",
",",
"cond",
".",
"operator",
")",
"try",
":",
"func",
"=",
"import_class",
"(",
"condition_method",
")",
"except",
"AttributeError",
":",
"condition_method",
"=",
"'rulengine.conditions.c_{0}'",
".",
"format",
"(",
"cond",
".",
"data_type",
")",
"func",
"=",
"import_class",
"(",
"condition_method",
")",
"executable_cond",
"=",
"convert_condition_to_executable",
"(",
"cond",
")",
"return",
"func",
"(",
"executable_cond",
")"
] | Get a rule instance for given operator and
return condition lambda func | [
"Get",
"a",
"rule",
"instance",
"for",
"given",
"operator",
"and",
"return",
"condition",
"lambda",
"func"
] | f4d1e6258927cb171cb7fc8a90a3cba546a2aee5 | https://github.com/baranbartu/rulengine/blob/f4d1e6258927cb171cb7fc8a90a3cba546a2aee5/rulengine/conditions.py#L5-L21 | train |
abiiranathan/db2 | db2/session.py | Session.makemigrations | def makemigrations(self):
''' Do database migrations
1. Creates new tables from models
2. Updates columns and columns
Returns True if no exception else raises an unhandled exception
'''
UNCHANGED = []
with Session(self.settings) as conn:
cursor = conn.cursor()
for name, model in self.models.items():
print("Running migrations... on table: %s"%model.__name__.lower())
columns = self.description(model)
table = name.lower()
QUERY = "CREATE TABLE IF NOT EXISTS %s ("%table
for field, FieldType in model.columns.items():
QUERY += "%s %s, " % (field, FieldType)
# If no columns --> Table not created yet
if columns:
self.UpdateColums(cursor, field, FieldType,
model, columns, UNCHANGED)
QUERY = QUERY[:-2] + ") ENGINE=InnoDB"
print(QUERY)
try:
cursor.execute(QUERY)
except mysql.Error as e:
raise e
return True | python | def makemigrations(self):
''' Do database migrations
1. Creates new tables from models
2. Updates columns and columns
Returns True if no exception else raises an unhandled exception
'''
UNCHANGED = []
with Session(self.settings) as conn:
cursor = conn.cursor()
for name, model in self.models.items():
print("Running migrations... on table: %s"%model.__name__.lower())
columns = self.description(model)
table = name.lower()
QUERY = "CREATE TABLE IF NOT EXISTS %s ("%table
for field, FieldType in model.columns.items():
QUERY += "%s %s, " % (field, FieldType)
# If no columns --> Table not created yet
if columns:
self.UpdateColums(cursor, field, FieldType,
model, columns, UNCHANGED)
QUERY = QUERY[:-2] + ") ENGINE=InnoDB"
print(QUERY)
try:
cursor.execute(QUERY)
except mysql.Error as e:
raise e
return True | [
"def",
"makemigrations",
"(",
"self",
")",
":",
"UNCHANGED",
"=",
"[",
"]",
"with",
"Session",
"(",
"self",
".",
"settings",
")",
"as",
"conn",
":",
"cursor",
"=",
"conn",
".",
"cursor",
"(",
")",
"for",
"name",
",",
"model",
"in",
"self",
".",
"models",
".",
"items",
"(",
")",
":",
"print",
"(",
"\"Running migrations... on table: %s\"",
"%",
"model",
".",
"__name__",
".",
"lower",
"(",
")",
")",
"columns",
"=",
"self",
".",
"description",
"(",
"model",
")",
"table",
"=",
"name",
".",
"lower",
"(",
")",
"QUERY",
"=",
"\"CREATE TABLE IF NOT EXISTS %s (\"",
"%",
"table",
"for",
"field",
",",
"FieldType",
"in",
"model",
".",
"columns",
".",
"items",
"(",
")",
":",
"QUERY",
"+=",
"\"%s %s, \"",
"%",
"(",
"field",
",",
"FieldType",
")",
"# If no columns --> Table not created yet",
"if",
"columns",
":",
"self",
".",
"UpdateColums",
"(",
"cursor",
",",
"field",
",",
"FieldType",
",",
"model",
",",
"columns",
",",
"UNCHANGED",
")",
"QUERY",
"=",
"QUERY",
"[",
":",
"-",
"2",
"]",
"+",
"\") ENGINE=InnoDB\"",
"print",
"(",
"QUERY",
")",
"try",
":",
"cursor",
".",
"execute",
"(",
"QUERY",
")",
"except",
"mysql",
".",
"Error",
"as",
"e",
":",
"raise",
"e",
"return",
"True"
] | Do database migrations
1. Creates new tables from models
2. Updates columns and columns
Returns True if no exception else raises an unhandled exception | [
"Do",
"database",
"migrations",
"1",
".",
"Creates",
"new",
"tables",
"from",
"models",
"2",
".",
"Updates",
"columns",
"and",
"columns"
] | 347319e421921517bcae7639f524c3c3eb5446e6 | https://github.com/abiiranathan/db2/blob/347319e421921517bcae7639f524c3c3eb5446e6/db2/session.py#L307-L342 | train |
abiiranathan/db2 | db2/session.py | Session.UpdateColums | def UpdateColums(self, cursor, field, FieldType, model, columns, UNCHANGED):
'''Updates the columns. Dont call directly
'''
table = model.__name__.lower()
if field not in columns:
n = UNCHANGED.pop()
new_sql = f"ALTER TABLE {table} ADD COLUMN {field} {FieldType} AFTER {n}"
cursor.execute(new_sql)
print("\n\n", new_sql)
else:
UNCHANGED.append(field)
# We drop the fields in the table not in models
TCOLS = set(columns)-set(model._fields)
for col in TCOLS:
columns.remove(col)
QRY = f"ALTER TABLE {table} DROP COLUMN {col}"
cursor.execute(QRY)
print("\n\n", QRY)
return True | python | def UpdateColums(self, cursor, field, FieldType, model, columns, UNCHANGED):
'''Updates the columns. Dont call directly
'''
table = model.__name__.lower()
if field not in columns:
n = UNCHANGED.pop()
new_sql = f"ALTER TABLE {table} ADD COLUMN {field} {FieldType} AFTER {n}"
cursor.execute(new_sql)
print("\n\n", new_sql)
else:
UNCHANGED.append(field)
# We drop the fields in the table not in models
TCOLS = set(columns)-set(model._fields)
for col in TCOLS:
columns.remove(col)
QRY = f"ALTER TABLE {table} DROP COLUMN {col}"
cursor.execute(QRY)
print("\n\n", QRY)
return True | [
"def",
"UpdateColums",
"(",
"self",
",",
"cursor",
",",
"field",
",",
"FieldType",
",",
"model",
",",
"columns",
",",
"UNCHANGED",
")",
":",
"table",
"=",
"model",
".",
"__name__",
".",
"lower",
"(",
")",
"if",
"field",
"not",
"in",
"columns",
":",
"n",
"=",
"UNCHANGED",
".",
"pop",
"(",
")",
"new_sql",
"=",
"f\"ALTER TABLE {table} ADD COLUMN {field} {FieldType} AFTER {n}\"",
"cursor",
".",
"execute",
"(",
"new_sql",
")",
"print",
"(",
"\"\\n\\n\"",
",",
"new_sql",
")",
"else",
":",
"UNCHANGED",
".",
"append",
"(",
"field",
")",
"# We drop the fields in the table not in models",
"TCOLS",
"=",
"set",
"(",
"columns",
")",
"-",
"set",
"(",
"model",
".",
"_fields",
")",
"for",
"col",
"in",
"TCOLS",
":",
"columns",
".",
"remove",
"(",
"col",
")",
"QRY",
"=",
"f\"ALTER TABLE {table} DROP COLUMN {col}\"",
"cursor",
".",
"execute",
"(",
"QRY",
")",
"print",
"(",
"\"\\n\\n\"",
",",
"QRY",
")",
"return",
"True"
] | Updates the columns. Dont call directly | [
"Updates",
"the",
"columns",
".",
"Dont",
"call",
"directly"
] | 347319e421921517bcae7639f524c3c3eb5446e6 | https://github.com/abiiranathan/db2/blob/347319e421921517bcae7639f524c3c3eb5446e6/db2/session.py#L344-L364 | train |
tamasgal/km3pipe | km3pipe/srv.py | srv_event | def srv_event(token, hits, url=RBA_URL):
"""Serve event to RainbowAlga"""
if url is None:
log.error("Please provide a valid RainbowAlga URL.")
return
ws_url = url + '/message'
if isinstance(hits, pd.core.frame.DataFrame):
pos = [tuple(x) for x in hits[['x', 'y', 'z']].values]
time = list(hits['time'])
tot = list(hits['tot'])
elif isinstance(hits, Table):
pos = list(zip(hits.pos_x, hits.pos_y, hits.pos_z))
time = list(hits.time)
tot = list(hits.tot)
else:
log.error(
"No calibration information found in hits (type: {0})".format(
type(hits)
)
)
return
event = {
"hits": {
'pos': pos,
'time': time,
'tot': tot,
}
}
srv_data(ws_url, token, event, 'event') | python | def srv_event(token, hits, url=RBA_URL):
"""Serve event to RainbowAlga"""
if url is None:
log.error("Please provide a valid RainbowAlga URL.")
return
ws_url = url + '/message'
if isinstance(hits, pd.core.frame.DataFrame):
pos = [tuple(x) for x in hits[['x', 'y', 'z']].values]
time = list(hits['time'])
tot = list(hits['tot'])
elif isinstance(hits, Table):
pos = list(zip(hits.pos_x, hits.pos_y, hits.pos_z))
time = list(hits.time)
tot = list(hits.tot)
else:
log.error(
"No calibration information found in hits (type: {0})".format(
type(hits)
)
)
return
event = {
"hits": {
'pos': pos,
'time': time,
'tot': tot,
}
}
srv_data(ws_url, token, event, 'event') | [
"def",
"srv_event",
"(",
"token",
",",
"hits",
",",
"url",
"=",
"RBA_URL",
")",
":",
"if",
"url",
"is",
"None",
":",
"log",
".",
"error",
"(",
"\"Please provide a valid RainbowAlga URL.\"",
")",
"return",
"ws_url",
"=",
"url",
"+",
"'/message'",
"if",
"isinstance",
"(",
"hits",
",",
"pd",
".",
"core",
".",
"frame",
".",
"DataFrame",
")",
":",
"pos",
"=",
"[",
"tuple",
"(",
"x",
")",
"for",
"x",
"in",
"hits",
"[",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
"]",
"]",
".",
"values",
"]",
"time",
"=",
"list",
"(",
"hits",
"[",
"'time'",
"]",
")",
"tot",
"=",
"list",
"(",
"hits",
"[",
"'tot'",
"]",
")",
"elif",
"isinstance",
"(",
"hits",
",",
"Table",
")",
":",
"pos",
"=",
"list",
"(",
"zip",
"(",
"hits",
".",
"pos_x",
",",
"hits",
".",
"pos_y",
",",
"hits",
".",
"pos_z",
")",
")",
"time",
"=",
"list",
"(",
"hits",
".",
"time",
")",
"tot",
"=",
"list",
"(",
"hits",
".",
"tot",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"No calibration information found in hits (type: {0})\"",
".",
"format",
"(",
"type",
"(",
"hits",
")",
")",
")",
"return",
"event",
"=",
"{",
"\"hits\"",
":",
"{",
"'pos'",
":",
"pos",
",",
"'time'",
":",
"time",
",",
"'tot'",
":",
"tot",
",",
"}",
"}",
"srv_data",
"(",
"ws_url",
",",
"token",
",",
"event",
",",
"'event'",
")"
] | Serve event to RainbowAlga | [
"Serve",
"event",
"to",
"RainbowAlga"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/srv.py#L289-L322 | train |
tamasgal/km3pipe | km3pipe/srv.py | srv_data | def srv_data(url, token, data, kind):
"""Serve data to RainbowAlga"""
ws = websocket.create_connection(url)
message = {'token': token, 'data': data, 'kind': kind}
ws.send(pd.io.json.dumps(message))
ws.close() | python | def srv_data(url, token, data, kind):
"""Serve data to RainbowAlga"""
ws = websocket.create_connection(url)
message = {'token': token, 'data': data, 'kind': kind}
ws.send(pd.io.json.dumps(message))
ws.close() | [
"def",
"srv_data",
"(",
"url",
",",
"token",
",",
"data",
",",
"kind",
")",
":",
"ws",
"=",
"websocket",
".",
"create_connection",
"(",
"url",
")",
"message",
"=",
"{",
"'token'",
":",
"token",
",",
"'data'",
":",
"data",
",",
"'kind'",
":",
"kind",
"}",
"ws",
".",
"send",
"(",
"pd",
".",
"io",
".",
"json",
".",
"dumps",
"(",
"message",
")",
")",
"ws",
".",
"close",
"(",
")"
] | Serve data to RainbowAlga | [
"Serve",
"data",
"to",
"RainbowAlga"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/srv.py#L325-L330 | train |
tamasgal/km3pipe | km3pipe/srv.py | ClientManager.raw_message_to | def raw_message_to(self, token, message):
"""Convert message to JSON and send it to the client with token"""
if token not in self._clients:
log.critical("Client with token '{0}' not found!".format(token))
return
client = self._clients[token]
try:
client.write_message(message)
except (AttributeError, tornado.websocket.WebSocketClosedError):
log.error("Lost connection to client '{0}'".format(client))
else:
print("Sent {0} bytes.".format(len(message))) | python | def raw_message_to(self, token, message):
"""Convert message to JSON and send it to the client with token"""
if token not in self._clients:
log.critical("Client with token '{0}' not found!".format(token))
return
client = self._clients[token]
try:
client.write_message(message)
except (AttributeError, tornado.websocket.WebSocketClosedError):
log.error("Lost connection to client '{0}'".format(client))
else:
print("Sent {0} bytes.".format(len(message))) | [
"def",
"raw_message_to",
"(",
"self",
",",
"token",
",",
"message",
")",
":",
"if",
"token",
"not",
"in",
"self",
".",
"_clients",
":",
"log",
".",
"critical",
"(",
"\"Client with token '{0}' not found!\"",
".",
"format",
"(",
"token",
")",
")",
"return",
"client",
"=",
"self",
".",
"_clients",
"[",
"token",
"]",
"try",
":",
"client",
".",
"write_message",
"(",
"message",
")",
"except",
"(",
"AttributeError",
",",
"tornado",
".",
"websocket",
".",
"WebSocketClosedError",
")",
":",
"log",
".",
"error",
"(",
"\"Lost connection to client '{0}'\"",
".",
"format",
"(",
"client",
")",
")",
"else",
":",
"print",
"(",
"\"Sent {0} bytes.\"",
".",
"format",
"(",
"len",
"(",
"message",
")",
")",
")"
] | Convert message to JSON and send it to the client with token | [
"Convert",
"message",
"to",
"JSON",
"and",
"send",
"it",
"to",
"the",
"client",
"with",
"token"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/srv.py#L114-L125 | train |
tamasgal/km3pipe | km3pipe/srv.py | EchoWebSocket.message | def message(self, data, kind="info"):
"""Convert message to json and send it to the clients"""
message = pd.io.json.dumps({'kind': kind, 'data': data})
print("Sent {0} bytes.".format(len(message)))
self.write_message(message) | python | def message(self, data, kind="info"):
"""Convert message to json and send it to the clients"""
message = pd.io.json.dumps({'kind': kind, 'data': data})
print("Sent {0} bytes.".format(len(message)))
self.write_message(message) | [
"def",
"message",
"(",
"self",
",",
"data",
",",
"kind",
"=",
"\"info\"",
")",
":",
"message",
"=",
"pd",
".",
"io",
".",
"json",
".",
"dumps",
"(",
"{",
"'kind'",
":",
"kind",
",",
"'data'",
":",
"data",
"}",
")",
"print",
"(",
"\"Sent {0} bytes.\"",
".",
"format",
"(",
"len",
"(",
"message",
")",
")",
")",
"self",
".",
"write_message",
"(",
"message",
")"
] | Convert message to json and send it to the clients | [
"Convert",
"message",
"to",
"json",
"and",
"send",
"it",
"to",
"the",
"clients"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/srv.py#L282-L286 | train |
vslutov/turingmarkov | turingmarkov/markov.py | Algorithm.execute_once | def execute_once(self, string):
"""Execute only one rule."""
for rule in self.rules:
if rule[0] in string:
pos = string.find(rule[0])
self.last_rule = rule
return string[:pos] + rule[1] + string[pos+len(rule[0]):]
self.last_rule = None
return string | python | def execute_once(self, string):
"""Execute only one rule."""
for rule in self.rules:
if rule[0] in string:
pos = string.find(rule[0])
self.last_rule = rule
return string[:pos] + rule[1] + string[pos+len(rule[0]):]
self.last_rule = None
return string | [
"def",
"execute_once",
"(",
"self",
",",
"string",
")",
":",
"for",
"rule",
"in",
"self",
".",
"rules",
":",
"if",
"rule",
"[",
"0",
"]",
"in",
"string",
":",
"pos",
"=",
"string",
".",
"find",
"(",
"rule",
"[",
"0",
"]",
")",
"self",
".",
"last_rule",
"=",
"rule",
"return",
"string",
"[",
":",
"pos",
"]",
"+",
"rule",
"[",
"1",
"]",
"+",
"string",
"[",
"pos",
"+",
"len",
"(",
"rule",
"[",
"0",
"]",
")",
":",
"]",
"self",
".",
"last_rule",
"=",
"None",
"return",
"string"
] | Execute only one rule. | [
"Execute",
"only",
"one",
"rule",
"."
] | 63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce | https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/markov.py#L51-L59 | train |
vslutov/turingmarkov | turingmarkov/markov.py | Algorithm.compile | def compile(self):
"""Return python code for create and execute algo."""
result = TEMPLATE
for rule in self.rules:
if rule[2]:
arrow = '=>'
else:
arrow = '->'
repr_rule = repr(rule[0] + arrow + rule[1])
result += "algo.add_rule({repr_rule})\n".format(repr_rule=repr_rule)
result += "for line in stdin:\n"
result += " print(algo.execute(''.join(line.split())))"
return result | python | def compile(self):
"""Return python code for create and execute algo."""
result = TEMPLATE
for rule in self.rules:
if rule[2]:
arrow = '=>'
else:
arrow = '->'
repr_rule = repr(rule[0] + arrow + rule[1])
result += "algo.add_rule({repr_rule})\n".format(repr_rule=repr_rule)
result += "for line in stdin:\n"
result += " print(algo.execute(''.join(line.split())))"
return result | [
"def",
"compile",
"(",
"self",
")",
":",
"result",
"=",
"TEMPLATE",
"for",
"rule",
"in",
"self",
".",
"rules",
":",
"if",
"rule",
"[",
"2",
"]",
":",
"arrow",
"=",
"'=>'",
"else",
":",
"arrow",
"=",
"'->'",
"repr_rule",
"=",
"repr",
"(",
"rule",
"[",
"0",
"]",
"+",
"arrow",
"+",
"rule",
"[",
"1",
"]",
")",
"result",
"+=",
"\"algo.add_rule({repr_rule})\\n\"",
".",
"format",
"(",
"repr_rule",
"=",
"repr_rule",
")",
"result",
"+=",
"\"for line in stdin:\\n\"",
"result",
"+=",
"\" print(algo.execute(''.join(line.split())))\"",
"return",
"result"
] | Return python code for create and execute algo. | [
"Return",
"python",
"code",
"for",
"create",
"and",
"execute",
"algo",
"."
] | 63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce | https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/markov.py#L76-L91 | train |
IRC-SPHERE/HyperStream | hyperstream/factor/factor.py | Factor.get_sources | def get_sources(self, plate, plate_value, sources=None):
"""
Gets the source streams for a given plate value on a plate.
Also populates with source streams that are valid for the parent plates of this plate,
with the appropriate meta-data for the parent plate.
:param plate: The plate being operated on
:param plate_value: The specific plate value of interest
:param sources: The currently found sources (for recursion)
:return: The appropriate source streams
:type plate: Plate
:type plate_value: tuple
:type sources: list[Stream] | None
"""
if sources is None:
sources = []
if self.sources:
for si, source in enumerate(self.sources):
if len(source.streams) == 1 and None in source.streams:
sources.append(source.streams[None])
elif plate_value in source.streams:
sources.append(source.streams[plate_value])
else:
# # TODO - determine whether this should raise an exception or not, or even log a warning
# logging.warn("{} with value {} not valid for source {}"
# .format(plate, plate_value, source))
pass
if not plate.is_root:
# Populate with sources defined on parent plate
parent_plate_value = tuple(pv for pv in plate_value if pv[0] != plate.meta_data_id)
sources = self.get_sources(plate.parent, parent_plate_value, sources)
# sources.extend(self.get_global_sources())
return sources | python | def get_sources(self, plate, plate_value, sources=None):
"""
Gets the source streams for a given plate value on a plate.
Also populates with source streams that are valid for the parent plates of this plate,
with the appropriate meta-data for the parent plate.
:param plate: The plate being operated on
:param plate_value: The specific plate value of interest
:param sources: The currently found sources (for recursion)
:return: The appropriate source streams
:type plate: Plate
:type plate_value: tuple
:type sources: list[Stream] | None
"""
if sources is None:
sources = []
if self.sources:
for si, source in enumerate(self.sources):
if len(source.streams) == 1 and None in source.streams:
sources.append(source.streams[None])
elif plate_value in source.streams:
sources.append(source.streams[plate_value])
else:
# # TODO - determine whether this should raise an exception or not, or even log a warning
# logging.warn("{} with value {} not valid for source {}"
# .format(plate, plate_value, source))
pass
if not plate.is_root:
# Populate with sources defined on parent plate
parent_plate_value = tuple(pv for pv in plate_value if pv[0] != plate.meta_data_id)
sources = self.get_sources(plate.parent, parent_plate_value, sources)
# sources.extend(self.get_global_sources())
return sources | [
"def",
"get_sources",
"(",
"self",
",",
"plate",
",",
"plate_value",
",",
"sources",
"=",
"None",
")",
":",
"if",
"sources",
"is",
"None",
":",
"sources",
"=",
"[",
"]",
"if",
"self",
".",
"sources",
":",
"for",
"si",
",",
"source",
"in",
"enumerate",
"(",
"self",
".",
"sources",
")",
":",
"if",
"len",
"(",
"source",
".",
"streams",
")",
"==",
"1",
"and",
"None",
"in",
"source",
".",
"streams",
":",
"sources",
".",
"append",
"(",
"source",
".",
"streams",
"[",
"None",
"]",
")",
"elif",
"plate_value",
"in",
"source",
".",
"streams",
":",
"sources",
".",
"append",
"(",
"source",
".",
"streams",
"[",
"plate_value",
"]",
")",
"else",
":",
"# # TODO - determine whether this should raise an exception or not, or even log a warning",
"# logging.warn(\"{} with value {} not valid for source {}\"",
"# .format(plate, plate_value, source))",
"pass",
"if",
"not",
"plate",
".",
"is_root",
":",
"# Populate with sources defined on parent plate",
"parent_plate_value",
"=",
"tuple",
"(",
"pv",
"for",
"pv",
"in",
"plate_value",
"if",
"pv",
"[",
"0",
"]",
"!=",
"plate",
".",
"meta_data_id",
")",
"sources",
"=",
"self",
".",
"get_sources",
"(",
"plate",
".",
"parent",
",",
"parent_plate_value",
",",
"sources",
")",
"# sources.extend(self.get_global_sources())",
"return",
"sources"
] | Gets the source streams for a given plate value on a plate.
Also populates with source streams that are valid for the parent plates of this plate,
with the appropriate meta-data for the parent plate.
:param plate: The plate being operated on
:param plate_value: The specific plate value of interest
:param sources: The currently found sources (for recursion)
:return: The appropriate source streams
:type plate: Plate
:type plate_value: tuple
:type sources: list[Stream] | None | [
"Gets",
"the",
"source",
"streams",
"for",
"a",
"given",
"plate",
"value",
"on",
"a",
"plate",
".",
"Also",
"populates",
"with",
"source",
"streams",
"that",
"are",
"valid",
"for",
"the",
"parent",
"plates",
"of",
"this",
"plate",
"with",
"the",
"appropriate",
"meta",
"-",
"data",
"for",
"the",
"parent",
"plate",
"."
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/factor/factor.py#L204-L240 | train |
IRC-SPHERE/HyperStream | hyperstream/factor/factor.py | Factor.get_global_sources | def get_global_sources(self):
"""
Gets streams that live outside of the plates
:return: Global streams
"""
sources = []
if self.sources:
for source in self.sources:
if None in source.streams:
sources.append(source.streams[None])
return sources | python | def get_global_sources(self):
"""
Gets streams that live outside of the plates
:return: Global streams
"""
sources = []
if self.sources:
for source in self.sources:
if None in source.streams:
sources.append(source.streams[None])
return sources | [
"def",
"get_global_sources",
"(",
"self",
")",
":",
"sources",
"=",
"[",
"]",
"if",
"self",
".",
"sources",
":",
"for",
"source",
"in",
"self",
".",
"sources",
":",
"if",
"None",
"in",
"source",
".",
"streams",
":",
"sources",
".",
"append",
"(",
"source",
".",
"streams",
"[",
"None",
"]",
")",
"return",
"sources"
] | Gets streams that live outside of the plates
:return: Global streams | [
"Gets",
"streams",
"that",
"live",
"outside",
"of",
"the",
"plates"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/factor/factor.py#L242-L253 | train |
IRC-SPHERE/HyperStream | hyperstream/factor/factor.py | Factor.get_alignment_stream | def get_alignment_stream(self, plate=None, plate_value=None):
"""
Gets the alignment stream for a particular plate value
:param plate: The plate on which the alignment node lives
:param plate_value: The plate value to select the stream from the node
:return: The alignment stream
"""
if not self.alignment_node:
return None
if plate is not None or plate_value is not None:
# TODO: Need to implement alignment nodes that live inside plates
raise NotImplementedError("Currently only alignment nodes outside of plates are supported")
return self.alignment_node.streams[plate] | python | def get_alignment_stream(self, plate=None, plate_value=None):
"""
Gets the alignment stream for a particular plate value
:param plate: The plate on which the alignment node lives
:param plate_value: The plate value to select the stream from the node
:return: The alignment stream
"""
if not self.alignment_node:
return None
if plate is not None or plate_value is not None:
# TODO: Need to implement alignment nodes that live inside plates
raise NotImplementedError("Currently only alignment nodes outside of plates are supported")
return self.alignment_node.streams[plate] | [
"def",
"get_alignment_stream",
"(",
"self",
",",
"plate",
"=",
"None",
",",
"plate_value",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"alignment_node",
":",
"return",
"None",
"if",
"plate",
"is",
"not",
"None",
"or",
"plate_value",
"is",
"not",
"None",
":",
"# TODO: Need to implement alignment nodes that live inside plates",
"raise",
"NotImplementedError",
"(",
"\"Currently only alignment nodes outside of plates are supported\"",
")",
"return",
"self",
".",
"alignment_node",
".",
"streams",
"[",
"plate",
"]"
] | Gets the alignment stream for a particular plate value
:param plate: The plate on which the alignment node lives
:param plate_value: The plate value to select the stream from the node
:return: The alignment stream | [
"Gets",
"the",
"alignment",
"stream",
"for",
"a",
"particular",
"plate",
"value"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/factor/factor.py#L255-L268 | train |
IRC-SPHERE/HyperStream | hyperstream/factor/factor.py | MultiOutputFactor.get_splitting_stream | def get_splitting_stream(self, input_plate_value):
"""
Get the splitting stream
:param input_plate_value: The input plate value
:return: The splitting stream
"""
if not self.splitting_node:
return None
if len(self.splitting_node.plates) == 0:
# Use global plate value
return self.splitting_node.streams[None]
if len(self.splitting_node.plates) > 1:
raise ValueError("Splitting node cannot live on multiple plates for factor {}"
.format(self.factor_id))
# now len(self.splitting_node.plates) == 1:
if not self.input_plate and len(self.splitting_node.plates) > 0:
raise ValueError("Splitting node cannot live on a plate if there is no input plate")
splitting_plate = self.splitting_node.plates[0]
if self.input_plate == splitting_plate:
# Use matching plate value
splitting_stream = self.splitting_node.streams[input_plate_value]
else:
# First check if it's a direct child
if splitting_plate.is_child(self.input_plate):
ppv = filter(lambda x: all(p in input_plate_value for p in x), self.input_plate.parent.values)
if len(ppv) != 1:
raise ValueError("Parent plate value not found")
splitting_stream = self.splitting_node.streams[ppv[0]]
# Then more generally if it's a descendant
elif splitting_plate.is_descendant(self.input_plate):
# Here we need to find the splitting plate value that is valid for the
# current input plate value
# TODO: This needs checking - is the logic still the same as for the case above?
ppv = filter(lambda x: all(p in input_plate_value for p in x), self.input_plate.parent.values)
if len(ppv) != 1:
raise ValueError("Parent plate value not found")
splitting_stream = self.splitting_node.streams[ppv]
else:
raise IncompatiblePlatesError(
"Splitting node plate {} does not match input plate {} for factor {}"
.format(self.input_plate, self.splitting_node.plates[0], self.factor_id))
return splitting_stream | python | def get_splitting_stream(self, input_plate_value):
"""
Get the splitting stream
:param input_plate_value: The input plate value
:return: The splitting stream
"""
if not self.splitting_node:
return None
if len(self.splitting_node.plates) == 0:
# Use global plate value
return self.splitting_node.streams[None]
if len(self.splitting_node.plates) > 1:
raise ValueError("Splitting node cannot live on multiple plates for factor {}"
.format(self.factor_id))
# now len(self.splitting_node.plates) == 1:
if not self.input_plate and len(self.splitting_node.plates) > 0:
raise ValueError("Splitting node cannot live on a plate if there is no input plate")
splitting_plate = self.splitting_node.plates[0]
if self.input_plate == splitting_plate:
# Use matching plate value
splitting_stream = self.splitting_node.streams[input_plate_value]
else:
# First check if it's a direct child
if splitting_plate.is_child(self.input_plate):
ppv = filter(lambda x: all(p in input_plate_value for p in x), self.input_plate.parent.values)
if len(ppv) != 1:
raise ValueError("Parent plate value not found")
splitting_stream = self.splitting_node.streams[ppv[0]]
# Then more generally if it's a descendant
elif splitting_plate.is_descendant(self.input_plate):
# Here we need to find the splitting plate value that is valid for the
# current input plate value
# TODO: This needs checking - is the logic still the same as for the case above?
ppv = filter(lambda x: all(p in input_plate_value for p in x), self.input_plate.parent.values)
if len(ppv) != 1:
raise ValueError("Parent plate value not found")
splitting_stream = self.splitting_node.streams[ppv]
else:
raise IncompatiblePlatesError(
"Splitting node plate {} does not match input plate {} for factor {}"
.format(self.input_plate, self.splitting_node.plates[0], self.factor_id))
return splitting_stream | [
"def",
"get_splitting_stream",
"(",
"self",
",",
"input_plate_value",
")",
":",
"if",
"not",
"self",
".",
"splitting_node",
":",
"return",
"None",
"if",
"len",
"(",
"self",
".",
"splitting_node",
".",
"plates",
")",
"==",
"0",
":",
"# Use global plate value",
"return",
"self",
".",
"splitting_node",
".",
"streams",
"[",
"None",
"]",
"if",
"len",
"(",
"self",
".",
"splitting_node",
".",
"plates",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Splitting node cannot live on multiple plates for factor {}\"",
".",
"format",
"(",
"self",
".",
"factor_id",
")",
")",
"# now len(self.splitting_node.plates) == 1:",
"if",
"not",
"self",
".",
"input_plate",
"and",
"len",
"(",
"self",
".",
"splitting_node",
".",
"plates",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"\"Splitting node cannot live on a plate if there is no input plate\"",
")",
"splitting_plate",
"=",
"self",
".",
"splitting_node",
".",
"plates",
"[",
"0",
"]",
"if",
"self",
".",
"input_plate",
"==",
"splitting_plate",
":",
"# Use matching plate value",
"splitting_stream",
"=",
"self",
".",
"splitting_node",
".",
"streams",
"[",
"input_plate_value",
"]",
"else",
":",
"# First check if it's a direct child",
"if",
"splitting_plate",
".",
"is_child",
"(",
"self",
".",
"input_plate",
")",
":",
"ppv",
"=",
"filter",
"(",
"lambda",
"x",
":",
"all",
"(",
"p",
"in",
"input_plate_value",
"for",
"p",
"in",
"x",
")",
",",
"self",
".",
"input_plate",
".",
"parent",
".",
"values",
")",
"if",
"len",
"(",
"ppv",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Parent plate value not found\"",
")",
"splitting_stream",
"=",
"self",
".",
"splitting_node",
".",
"streams",
"[",
"ppv",
"[",
"0",
"]",
"]",
"# Then more generally if it's a descendant",
"elif",
"splitting_plate",
".",
"is_descendant",
"(",
"self",
".",
"input_plate",
")",
":",
"# Here we need to find the splitting plate value that is valid for the",
"# current input plate value",
"# TODO: This needs checking - is the logic still the same as for the case above?",
"ppv",
"=",
"filter",
"(",
"lambda",
"x",
":",
"all",
"(",
"p",
"in",
"input_plate_value",
"for",
"p",
"in",
"x",
")",
",",
"self",
".",
"input_plate",
".",
"parent",
".",
"values",
")",
"if",
"len",
"(",
"ppv",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Parent plate value not found\"",
")",
"splitting_stream",
"=",
"self",
".",
"splitting_node",
".",
"streams",
"[",
"ppv",
"]",
"else",
":",
"raise",
"IncompatiblePlatesError",
"(",
"\"Splitting node plate {} does not match input plate {} for factor {}\"",
".",
"format",
"(",
"self",
".",
"input_plate",
",",
"self",
".",
"splitting_node",
".",
"plates",
"[",
"0",
"]",
",",
"self",
".",
"factor_id",
")",
")",
"return",
"splitting_stream"
] | Get the splitting stream
:param input_plate_value: The input plate value
:return: The splitting stream | [
"Get",
"the",
"splitting",
"stream"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/factor/factor.py#L390-L436 | train |
IRC-SPHERE/HyperStream | hyperstream/factor/factor.py | MultiOutputFactor.update_computed_intervals | def update_computed_intervals(sinks, time_interval):
"""
Update computed intervals
:param sinks: The streams to update
:param time_interval: The time interval
"""
for sink in sinks:
sink.calculated_intervals += time_interval
required_intervals = TimeIntervals([time_interval]) - sink.calculated_intervals
if not required_intervals.is_empty:
raise RuntimeError('Tool execution did not cover the time interval {}'
.format(required_intervals)) | python | def update_computed_intervals(sinks, time_interval):
"""
Update computed intervals
:param sinks: The streams to update
:param time_interval: The time interval
"""
for sink in sinks:
sink.calculated_intervals += time_interval
required_intervals = TimeIntervals([time_interval]) - sink.calculated_intervals
if not required_intervals.is_empty:
raise RuntimeError('Tool execution did not cover the time interval {}'
.format(required_intervals)) | [
"def",
"update_computed_intervals",
"(",
"sinks",
",",
"time_interval",
")",
":",
"for",
"sink",
"in",
"sinks",
":",
"sink",
".",
"calculated_intervals",
"+=",
"time_interval",
"required_intervals",
"=",
"TimeIntervals",
"(",
"[",
"time_interval",
"]",
")",
"-",
"sink",
".",
"calculated_intervals",
"if",
"not",
"required_intervals",
".",
"is_empty",
":",
"raise",
"RuntimeError",
"(",
"'Tool execution did not cover the time interval {}'",
".",
"format",
"(",
"required_intervals",
")",
")"
] | Update computed intervals
:param sinks: The streams to update
:param time_interval: The time interval | [
"Update",
"computed",
"intervals"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/factor/factor.py#L439-L451 | train |
lexibank/pylexibank | src/pylexibank/util.py | getEvoBibAsBibtex | def getEvoBibAsBibtex(*keys, **kw):
"""Download bibtex format and parse it from EvoBib"""
res = []
for key in keys:
bib = get_url(
"http://bibliography.lingpy.org/raw.php?key=" + key,
log=kw.get('log')).text
try:
res.append('@' + bib.split('@')[1].split('</pre>')[0])
except IndexError: # pragma: no cover
res.append('@misc{' + key + ',\nNote={missing source}\n\n}')
return '\n\n'.join(res) | python | def getEvoBibAsBibtex(*keys, **kw):
"""Download bibtex format and parse it from EvoBib"""
res = []
for key in keys:
bib = get_url(
"http://bibliography.lingpy.org/raw.php?key=" + key,
log=kw.get('log')).text
try:
res.append('@' + bib.split('@')[1].split('</pre>')[0])
except IndexError: # pragma: no cover
res.append('@misc{' + key + ',\nNote={missing source}\n\n}')
return '\n\n'.join(res) | [
"def",
"getEvoBibAsBibtex",
"(",
"*",
"keys",
",",
"*",
"*",
"kw",
")",
":",
"res",
"=",
"[",
"]",
"for",
"key",
"in",
"keys",
":",
"bib",
"=",
"get_url",
"(",
"\"http://bibliography.lingpy.org/raw.php?key=\"",
"+",
"key",
",",
"log",
"=",
"kw",
".",
"get",
"(",
"'log'",
")",
")",
".",
"text",
"try",
":",
"res",
".",
"append",
"(",
"'@'",
"+",
"bib",
".",
"split",
"(",
"'@'",
")",
"[",
"1",
"]",
".",
"split",
"(",
"'</pre>'",
")",
"[",
"0",
"]",
")",
"except",
"IndexError",
":",
"# pragma: no cover",
"res",
".",
"append",
"(",
"'@misc{'",
"+",
"key",
"+",
"',\\nNote={missing source}\\n\\n}'",
")",
"return",
"'\\n\\n'",
".",
"join",
"(",
"res",
")"
] | Download bibtex format and parse it from EvoBib | [
"Download",
"bibtex",
"format",
"and",
"parse",
"it",
"from",
"EvoBib"
] | c28e7f122f20de1232623dd7003cb5b01535e581 | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/util.py#L233-L244 | train |
lexibank/pylexibank | src/pylexibank/util.py | DataDir.download_and_unpack | def download_and_unpack(self, url, *paths, **kw):
"""
Download a zipfile and immediately unpack selected content.
:param url:
:param paths:
:param kw:
:return:
"""
with self.temp_download(url, 'ds.zip', log=kw.pop('log', None)) as zipp:
with TemporaryDirectory() as tmpdir:
with zipfile.ZipFile(zipp.as_posix()) as zipf:
for path in paths:
zipf.extract(as_posix(path), path=tmpdir.as_posix())
copy(tmpdir.joinpath(path), self) | python | def download_and_unpack(self, url, *paths, **kw):
"""
Download a zipfile and immediately unpack selected content.
:param url:
:param paths:
:param kw:
:return:
"""
with self.temp_download(url, 'ds.zip', log=kw.pop('log', None)) as zipp:
with TemporaryDirectory() as tmpdir:
with zipfile.ZipFile(zipp.as_posix()) as zipf:
for path in paths:
zipf.extract(as_posix(path), path=tmpdir.as_posix())
copy(tmpdir.joinpath(path), self) | [
"def",
"download_and_unpack",
"(",
"self",
",",
"url",
",",
"*",
"paths",
",",
"*",
"*",
"kw",
")",
":",
"with",
"self",
".",
"temp_download",
"(",
"url",
",",
"'ds.zip'",
",",
"log",
"=",
"kw",
".",
"pop",
"(",
"'log'",
",",
"None",
")",
")",
"as",
"zipp",
":",
"with",
"TemporaryDirectory",
"(",
")",
"as",
"tmpdir",
":",
"with",
"zipfile",
".",
"ZipFile",
"(",
"zipp",
".",
"as_posix",
"(",
")",
")",
"as",
"zipf",
":",
"for",
"path",
"in",
"paths",
":",
"zipf",
".",
"extract",
"(",
"as_posix",
"(",
"path",
")",
",",
"path",
"=",
"tmpdir",
".",
"as_posix",
"(",
")",
")",
"copy",
"(",
"tmpdir",
".",
"joinpath",
"(",
"path",
")",
",",
"self",
")"
] | Download a zipfile and immediately unpack selected content.
:param url:
:param paths:
:param kw:
:return: | [
"Download",
"a",
"zipfile",
"and",
"immediately",
"unpack",
"selected",
"content",
"."
] | c28e7f122f20de1232623dd7003cb5b01535e581 | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/util.py#L216-L230 | train |
PrefPy/prefpy | prefpy/mechanismMcmc.py | MechanismMcmc.getRanking | def getRanking(self, profile, sampleFileName = None):
"""
Returns a list of lists that orders all candidates in tiers from best to worst when we use
MCMC approximation to compute Bayesian utilities for an election profile.
:ivar Profile profile: A Profile object that represents an election profile.
:ivar str sampleFileName: An optional argument for the name of the input file containing
sample data. If a file name is given, this method will use the samples in the file
instead of generating samples itself.
"""
if sampleFileName != None:
candScoresMap = self.getCandScoresMapFromSamplesFile(profile, sampleFileName)
else:
candScoresMap = self.getCandScoresMap(profile)
# We generate a map that associates each score with the candidates that have that acore.
reverseCandScoresMap = dict()
for key, value in candScoresMap.items():
if value not in reverseCandScoresMap.keys():
reverseCandScoresMap[value] = [key]
else:
reverseCandScoresMap[value].append(key)
# We sort the scores by either decreasing order or increasing order.
if self.maximizeCandScore == True:
sortedCandScores = sorted(reverseCandScoresMap.keys(), reverse=True)
else:
sortedCandScores = sorted(reverseCandScoresMap.keys())
# We put the candidates into our ranking based on the order in which their score appears
ranking = []
for candScore in sortedCandScores:
for cand in reverseCandScoresMap[candScore]:
ranking.append(cand)
return ranking | python | def getRanking(self, profile, sampleFileName = None):
"""
Returns a list of lists that orders all candidates in tiers from best to worst when we use
MCMC approximation to compute Bayesian utilities for an election profile.
:ivar Profile profile: A Profile object that represents an election profile.
:ivar str sampleFileName: An optional argument for the name of the input file containing
sample data. If a file name is given, this method will use the samples in the file
instead of generating samples itself.
"""
if sampleFileName != None:
candScoresMap = self.getCandScoresMapFromSamplesFile(profile, sampleFileName)
else:
candScoresMap = self.getCandScoresMap(profile)
# We generate a map that associates each score with the candidates that have that acore.
reverseCandScoresMap = dict()
for key, value in candScoresMap.items():
if value not in reverseCandScoresMap.keys():
reverseCandScoresMap[value] = [key]
else:
reverseCandScoresMap[value].append(key)
# We sort the scores by either decreasing order or increasing order.
if self.maximizeCandScore == True:
sortedCandScores = sorted(reverseCandScoresMap.keys(), reverse=True)
else:
sortedCandScores = sorted(reverseCandScoresMap.keys())
# We put the candidates into our ranking based on the order in which their score appears
ranking = []
for candScore in sortedCandScores:
for cand in reverseCandScoresMap[candScore]:
ranking.append(cand)
return ranking | [
"def",
"getRanking",
"(",
"self",
",",
"profile",
",",
"sampleFileName",
"=",
"None",
")",
":",
"if",
"sampleFileName",
"!=",
"None",
":",
"candScoresMap",
"=",
"self",
".",
"getCandScoresMapFromSamplesFile",
"(",
"profile",
",",
"sampleFileName",
")",
"else",
":",
"candScoresMap",
"=",
"self",
".",
"getCandScoresMap",
"(",
"profile",
")",
"# We generate a map that associates each score with the candidates that have that acore.",
"reverseCandScoresMap",
"=",
"dict",
"(",
")",
"for",
"key",
",",
"value",
"in",
"candScoresMap",
".",
"items",
"(",
")",
":",
"if",
"value",
"not",
"in",
"reverseCandScoresMap",
".",
"keys",
"(",
")",
":",
"reverseCandScoresMap",
"[",
"value",
"]",
"=",
"[",
"key",
"]",
"else",
":",
"reverseCandScoresMap",
"[",
"value",
"]",
".",
"append",
"(",
"key",
")",
"# We sort the scores by either decreasing order or increasing order.",
"if",
"self",
".",
"maximizeCandScore",
"==",
"True",
":",
"sortedCandScores",
"=",
"sorted",
"(",
"reverseCandScoresMap",
".",
"keys",
"(",
")",
",",
"reverse",
"=",
"True",
")",
"else",
":",
"sortedCandScores",
"=",
"sorted",
"(",
"reverseCandScoresMap",
".",
"keys",
"(",
")",
")",
"# We put the candidates into our ranking based on the order in which their score appears",
"ranking",
"=",
"[",
"]",
"for",
"candScore",
"in",
"sortedCandScores",
":",
"for",
"cand",
"in",
"reverseCandScoresMap",
"[",
"candScore",
"]",
":",
"ranking",
".",
"append",
"(",
"cand",
")",
"return",
"ranking"
] | Returns a list of lists that orders all candidates in tiers from best to worst when we use
MCMC approximation to compute Bayesian utilities for an election profile.
:ivar Profile profile: A Profile object that represents an election profile.
:ivar str sampleFileName: An optional argument for the name of the input file containing
sample data. If a file name is given, this method will use the samples in the file
instead of generating samples itself. | [
"Returns",
"a",
"list",
"of",
"lists",
"that",
"orders",
"all",
"candidates",
"in",
"tiers",
"from",
"best",
"to",
"worst",
"when",
"we",
"use",
"MCMC",
"approximation",
"to",
"compute",
"Bayesian",
"utilities",
"for",
"an",
"election",
"profile",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L48-L84 | train |
PrefPy/prefpy | prefpy/mechanismMcmc.py | MechanismMcmc.getCandScoresMap | def getCandScoresMap(self, profile):
"""
Returns a dictonary that associates the integer representation of each candidate with the
Bayesian utilities we approximate from our sampling of the profile.
:ivar Profile profile: A Profile object that represents an election profile.
"""
wmg = profile.getWmg(True)
V = self.getInitialSample(wmg)
utilities = dict()
for cand in profile.candMap.keys():
utilities[cand] = 0.0
for i in range(0, self.burnIn):
V = self.sampleGenerator.getNextSample(V)
for i in range(0, self.n2):
for j in range(0, self.n1):
V = self.sampleGenerator.getNextSample(V)
for cand in profile.candMap.keys():
utilities[cand] += self.utilityFunction.getUtility([cand], V)
for cand in profile.candMap.keys():
utilities[cand] = utilities[cand]/self.n2
return utilities | python | def getCandScoresMap(self, profile):
"""
Returns a dictonary that associates the integer representation of each candidate with the
Bayesian utilities we approximate from our sampling of the profile.
:ivar Profile profile: A Profile object that represents an election profile.
"""
wmg = profile.getWmg(True)
V = self.getInitialSample(wmg)
utilities = dict()
for cand in profile.candMap.keys():
utilities[cand] = 0.0
for i in range(0, self.burnIn):
V = self.sampleGenerator.getNextSample(V)
for i in range(0, self.n2):
for j in range(0, self.n1):
V = self.sampleGenerator.getNextSample(V)
for cand in profile.candMap.keys():
utilities[cand] += self.utilityFunction.getUtility([cand], V)
for cand in profile.candMap.keys():
utilities[cand] = utilities[cand]/self.n2
return utilities | [
"def",
"getCandScoresMap",
"(",
"self",
",",
"profile",
")",
":",
"wmg",
"=",
"profile",
".",
"getWmg",
"(",
"True",
")",
"V",
"=",
"self",
".",
"getInitialSample",
"(",
"wmg",
")",
"utilities",
"=",
"dict",
"(",
")",
"for",
"cand",
"in",
"profile",
".",
"candMap",
".",
"keys",
"(",
")",
":",
"utilities",
"[",
"cand",
"]",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"burnIn",
")",
":",
"V",
"=",
"self",
".",
"sampleGenerator",
".",
"getNextSample",
"(",
"V",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"n2",
")",
":",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"self",
".",
"n1",
")",
":",
"V",
"=",
"self",
".",
"sampleGenerator",
".",
"getNextSample",
"(",
"V",
")",
"for",
"cand",
"in",
"profile",
".",
"candMap",
".",
"keys",
"(",
")",
":",
"utilities",
"[",
"cand",
"]",
"+=",
"self",
".",
"utilityFunction",
".",
"getUtility",
"(",
"[",
"cand",
"]",
",",
"V",
")",
"for",
"cand",
"in",
"profile",
".",
"candMap",
".",
"keys",
"(",
")",
":",
"utilities",
"[",
"cand",
"]",
"=",
"utilities",
"[",
"cand",
"]",
"/",
"self",
".",
"n2",
"return",
"utilities"
] | Returns a dictonary that associates the integer representation of each candidate with the
Bayesian utilities we approximate from our sampling of the profile.
:ivar Profile profile: A Profile object that represents an election profile. | [
"Returns",
"a",
"dictonary",
"that",
"associates",
"the",
"integer",
"representation",
"of",
"each",
"candidate",
"with",
"the",
"Bayesian",
"utilities",
"we",
"approximate",
"from",
"our",
"sampling",
"of",
"the",
"profile",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L86-L113 | train |
PrefPy/prefpy | prefpy/mechanismMcmc.py | MechanismMcmc.getCandScoresMapFromSamplesFile | def getCandScoresMapFromSamplesFile(self, profile, sampleFileName):
"""
Returns a dictonary that associates the integer representation of each candidate with the
Bayesian utilities we approximate from the samples we generated into a file.
:ivar Profile profile: A Profile object that represents an election profile.
:ivar str sampleFileName: The name of the input file containing the sample data.
"""
wmg = profile.getWmg(True)
# Initialize our list of expected utilities.
utilities = dict()
for cand in wmg.keys():
utilities[cand] = 0.0
# Open the file and skip the lines of meta data in the file and skip samples for burn-in.
sampleFile = open(sampleFileName)
for i in range(0, SAMPLESFILEMETADATALINECOUNT):
sampleFile.readline()
for i in range(0, self.burnIn):
sampleFile.readline()
# We update our utilities as we read the file.
numSamples = 0
for i in range(0, self.n2*self.n1):
line = sampleFile.readline()
if i % self.n1 != 0: continue
sample = json.loads(line)
for cand in wmg.keys():
utilities[cand] += self.utilityFunction.getUtility([cand], sample)
numSamples += 1
sampleFile.close()
for key in utilities.keys():
utilities[key] = utilities[key]/numSamples
return utilities | python | def getCandScoresMapFromSamplesFile(self, profile, sampleFileName):
"""
Returns a dictonary that associates the integer representation of each candidate with the
Bayesian utilities we approximate from the samples we generated into a file.
:ivar Profile profile: A Profile object that represents an election profile.
:ivar str sampleFileName: The name of the input file containing the sample data.
"""
wmg = profile.getWmg(True)
# Initialize our list of expected utilities.
utilities = dict()
for cand in wmg.keys():
utilities[cand] = 0.0
# Open the file and skip the lines of meta data in the file and skip samples for burn-in.
sampleFile = open(sampleFileName)
for i in range(0, SAMPLESFILEMETADATALINECOUNT):
sampleFile.readline()
for i in range(0, self.burnIn):
sampleFile.readline()
# We update our utilities as we read the file.
numSamples = 0
for i in range(0, self.n2*self.n1):
line = sampleFile.readline()
if i % self.n1 != 0: continue
sample = json.loads(line)
for cand in wmg.keys():
utilities[cand] += self.utilityFunction.getUtility([cand], sample)
numSamples += 1
sampleFile.close()
for key in utilities.keys():
utilities[key] = utilities[key]/numSamples
return utilities | [
"def",
"getCandScoresMapFromSamplesFile",
"(",
"self",
",",
"profile",
",",
"sampleFileName",
")",
":",
"wmg",
"=",
"profile",
".",
"getWmg",
"(",
"True",
")",
"# Initialize our list of expected utilities.",
"utilities",
"=",
"dict",
"(",
")",
"for",
"cand",
"in",
"wmg",
".",
"keys",
"(",
")",
":",
"utilities",
"[",
"cand",
"]",
"=",
"0.0",
"# Open the file and skip the lines of meta data in the file and skip samples for burn-in.",
"sampleFile",
"=",
"open",
"(",
"sampleFileName",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"SAMPLESFILEMETADATALINECOUNT",
")",
":",
"sampleFile",
".",
"readline",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"burnIn",
")",
":",
"sampleFile",
".",
"readline",
"(",
")",
"# We update our utilities as we read the file.",
"numSamples",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"n2",
"*",
"self",
".",
"n1",
")",
":",
"line",
"=",
"sampleFile",
".",
"readline",
"(",
")",
"if",
"i",
"%",
"self",
".",
"n1",
"!=",
"0",
":",
"continue",
"sample",
"=",
"json",
".",
"loads",
"(",
"line",
")",
"for",
"cand",
"in",
"wmg",
".",
"keys",
"(",
")",
":",
"utilities",
"[",
"cand",
"]",
"+=",
"self",
".",
"utilityFunction",
".",
"getUtility",
"(",
"[",
"cand",
"]",
",",
"sample",
")",
"numSamples",
"+=",
"1",
"sampleFile",
".",
"close",
"(",
")",
"for",
"key",
"in",
"utilities",
".",
"keys",
"(",
")",
":",
"utilities",
"[",
"key",
"]",
"=",
"utilities",
"[",
"key",
"]",
"/",
"numSamples",
"return",
"utilities"
] | Returns a dictonary that associates the integer representation of each candidate with the
Bayesian utilities we approximate from the samples we generated into a file.
:ivar Profile profile: A Profile object that represents an election profile.
:ivar str sampleFileName: The name of the input file containing the sample data. | [
"Returns",
"a",
"dictonary",
"that",
"associates",
"the",
"integer",
"representation",
"of",
"each",
"candidate",
"with",
"the",
"Bayesian",
"utilities",
"we",
"approximate",
"from",
"the",
"samples",
"we",
"generated",
"into",
"a",
"file",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L176-L212 | train |
PrefPy/prefpy | prefpy/mechanismMcmc.py | MechanismMcmc.printMcmcSamplesToFile | def printMcmcSamplesToFile(self, profile, numSamples, outFileName):
"""
Generate samples to a file.
:ivar Profile profile: A Profile object that represents an election profile.
:ivar int numSamples: The number of samples to be generated.
:ivar str outFileName: The name of the file to be output.
"""
wmg = profile.getWmg(True)
V = self.getInitialSample(wmg)
# Print the number of candidates, phi, and the number of samples.
outFile = open(outFileName, 'w')
outFile.write("m," + str(profile.numCands) + '\n')
outFile.write("phi," + str(self.phi) + '\n')
outFile.write("numSamples," + str(numSamples))
for i in range(0, numSamples):
V = self.sampleGenerator.getNextSample(V)
outFile.write("\n" + json.dumps(V))
outFile.close() | python | def printMcmcSamplesToFile(self, profile, numSamples, outFileName):
"""
Generate samples to a file.
:ivar Profile profile: A Profile object that represents an election profile.
:ivar int numSamples: The number of samples to be generated.
:ivar str outFileName: The name of the file to be output.
"""
wmg = profile.getWmg(True)
V = self.getInitialSample(wmg)
# Print the number of candidates, phi, and the number of samples.
outFile = open(outFileName, 'w')
outFile.write("m," + str(profile.numCands) + '\n')
outFile.write("phi," + str(self.phi) + '\n')
outFile.write("numSamples," + str(numSamples))
for i in range(0, numSamples):
V = self.sampleGenerator.getNextSample(V)
outFile.write("\n" + json.dumps(V))
outFile.close() | [
"def",
"printMcmcSamplesToFile",
"(",
"self",
",",
"profile",
",",
"numSamples",
",",
"outFileName",
")",
":",
"wmg",
"=",
"profile",
".",
"getWmg",
"(",
"True",
")",
"V",
"=",
"self",
".",
"getInitialSample",
"(",
"wmg",
")",
"# Print the number of candidates, phi, and the number of samples.",
"outFile",
"=",
"open",
"(",
"outFileName",
",",
"'w'",
")",
"outFile",
".",
"write",
"(",
"\"m,\"",
"+",
"str",
"(",
"profile",
".",
"numCands",
")",
"+",
"'\\n'",
")",
"outFile",
".",
"write",
"(",
"\"phi,\"",
"+",
"str",
"(",
"self",
".",
"phi",
")",
"+",
"'\\n'",
")",
"outFile",
".",
"write",
"(",
"\"numSamples,\"",
"+",
"str",
"(",
"numSamples",
")",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"numSamples",
")",
":",
"V",
"=",
"self",
".",
"sampleGenerator",
".",
"getNextSample",
"(",
"V",
")",
"outFile",
".",
"write",
"(",
"\"\\n\"",
"+",
"json",
".",
"dumps",
"(",
"V",
")",
")",
"outFile",
".",
"close",
"(",
")"
] | Generate samples to a file.
:ivar Profile profile: A Profile object that represents an election profile.
:ivar int numSamples: The number of samples to be generated.
:ivar str outFileName: The name of the file to be output. | [
"Generate",
"samples",
"to",
"a",
"file",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L214-L235 | train |
PrefPy/prefpy | prefpy/mechanismMcmc.py | MechanismMcmcMallows.kendallTau | def kendallTau(self, orderVector, wmgMap):
"""
Given a ranking for a single vote and a wmg for the entire election, calculate the kendall-tau
distance. a.k.a the number of discordant pairs between the wmg for the vote and the wmg for the
election. Currently, we expect the vote to be a strict complete ordering over the candidates.
:ivar list<int> rankList: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last.
:ivar dict<int,<dict,<int,int>>> wmgMap: A two-dimensional dictionary that associates integer
representations of each pair of candidates, cand1 and cand2, with the number of times
cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. The
dictionary represents a weighted majority graph constructed from an entire election.
"""
discordantPairs = 0.0
for i in itertools.combinations(orderVector, 2):
discordantPairs = discordantPairs + max(0, wmgMap[i[1]][i[0]])
return discordantPairs | python | def kendallTau(self, orderVector, wmgMap):
"""
Given a ranking for a single vote and a wmg for the entire election, calculate the kendall-tau
distance. a.k.a the number of discordant pairs between the wmg for the vote and the wmg for the
election. Currently, we expect the vote to be a strict complete ordering over the candidates.
:ivar list<int> rankList: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last.
:ivar dict<int,<dict,<int,int>>> wmgMap: A two-dimensional dictionary that associates integer
representations of each pair of candidates, cand1 and cand2, with the number of times
cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. The
dictionary represents a weighted majority graph constructed from an entire election.
"""
discordantPairs = 0.0
for i in itertools.combinations(orderVector, 2):
discordantPairs = discordantPairs + max(0, wmgMap[i[1]][i[0]])
return discordantPairs | [
"def",
"kendallTau",
"(",
"self",
",",
"orderVector",
",",
"wmgMap",
")",
":",
"discordantPairs",
"=",
"0.0",
"for",
"i",
"in",
"itertools",
".",
"combinations",
"(",
"orderVector",
",",
"2",
")",
":",
"discordantPairs",
"=",
"discordantPairs",
"+",
"max",
"(",
"0",
",",
"wmgMap",
"[",
"i",
"[",
"1",
"]",
"]",
"[",
"i",
"[",
"0",
"]",
"]",
")",
"return",
"discordantPairs"
] | Given a ranking for a single vote and a wmg for the entire election, calculate the kendall-tau
distance. a.k.a the number of discordant pairs between the wmg for the vote and the wmg for the
election. Currently, we expect the vote to be a strict complete ordering over the candidates.
:ivar list<int> rankList: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last.
:ivar dict<int,<dict,<int,int>>> wmgMap: A two-dimensional dictionary that associates integer
representations of each pair of candidates, cand1 and cand2, with the number of times
cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. The
dictionary represents a weighted majority graph constructed from an entire election. | [
"Given",
"a",
"ranking",
"for",
"a",
"single",
"vote",
"and",
"a",
"wmg",
"for",
"the",
"entire",
"election",
"calculate",
"the",
"kendall",
"-",
"tau",
"distance",
".",
"a",
".",
"k",
".",
"a",
"the",
"number",
"of",
"discordant",
"pairs",
"between",
"the",
"wmg",
"for",
"the",
"vote",
"and",
"the",
"wmg",
"for",
"the",
"election",
".",
"Currently",
"we",
"expect",
"the",
"vote",
"to",
"be",
"a",
"strict",
"complete",
"ordering",
"over",
"the",
"candidates",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L253-L270 | train |
PrefPy/prefpy | prefpy/mechanismMcmc.py | MechanismMcmcMallows.getInitialSample | def getInitialSample(self, wmg):
"""
Generate an initial sample for the Markov chain. This function will return a list
containing integer representations of each candidate in order of their rank in the current
vote, from first to last. The list will be a complete strict ordering over the candidates.
Initially, we rank the candidates in random order.
ivar: dict<int,<dict,<int,int>>> wmg: A two-dimensional dictionary that associates integer
representations of each pair of candidates, cand1 and cand2, with the number of times
cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. The
dictionary represents a weighted majority graph for an election.
"""
V = copy.deepcopy(wmg.keys())
random.shuffle(V)
return V | python | def getInitialSample(self, wmg):
"""
Generate an initial sample for the Markov chain. This function will return a list
containing integer representations of each candidate in order of their rank in the current
vote, from first to last. The list will be a complete strict ordering over the candidates.
Initially, we rank the candidates in random order.
ivar: dict<int,<dict,<int,int>>> wmg: A two-dimensional dictionary that associates integer
representations of each pair of candidates, cand1 and cand2, with the number of times
cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. The
dictionary represents a weighted majority graph for an election.
"""
V = copy.deepcopy(wmg.keys())
random.shuffle(V)
return V | [
"def",
"getInitialSample",
"(",
"self",
",",
"wmg",
")",
":",
"V",
"=",
"copy",
".",
"deepcopy",
"(",
"wmg",
".",
"keys",
"(",
")",
")",
"random",
".",
"shuffle",
"(",
"V",
")",
"return",
"V"
] | Generate an initial sample for the Markov chain. This function will return a list
containing integer representations of each candidate in order of their rank in the current
vote, from first to last. The list will be a complete strict ordering over the candidates.
Initially, we rank the candidates in random order.
ivar: dict<int,<dict,<int,int>>> wmg: A two-dimensional dictionary that associates integer
representations of each pair of candidates, cand1 and cand2, with the number of times
cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. The
dictionary represents a weighted majority graph for an election. | [
"Generate",
"an",
"initial",
"sample",
"for",
"the",
"Markov",
"chain",
".",
"This",
"function",
"will",
"return",
"a",
"list",
"containing",
"integer",
"representations",
"of",
"each",
"candidate",
"in",
"order",
"of",
"their",
"rank",
"in",
"the",
"current",
"vote",
"from",
"first",
"to",
"last",
".",
"The",
"list",
"will",
"be",
"a",
"complete",
"strict",
"ordering",
"over",
"the",
"candidates",
".",
"Initially",
"we",
"rank",
"the",
"candidates",
"in",
"random",
"order",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L272-L287 | train |
PrefPy/prefpy | prefpy/mechanismMcmc.py | MechanismMcmcCondorcet.getInitialSample | def getInitialSample(self, wmg):
"""
Generate an initial sample for the Markov chain. This function will return a
two-dimensional array of integers, such that for each pair of candidates, cand1 and cand2,
the array contains 1 if more votes rank cand1 above cand2 and 0 otherwise.
ivar: dict<int,<dict,<int,int>>> wmg: A two-dimensional dictionary that associates integer
representations of each pair of candidates, cand1 and cand2, with the number of times
cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. The
dictionary represents a weighted majority graph for an election.
"""
cands = range(len(wmg))
allPairs = itertools.combinations(cands, 2)
V = self.createBinaryRelation(len(cands))
for pair in allPairs:
if wmg[pair[0]+1][pair[1]+1] > 0:
V[pair[0]][pair[1]] = 1
V[pair[1]][pair[0]] = 0
else:
V[pair[0]][pair[1]] = 0
V[pair[1]][pair[0]] = 1
return V | python | def getInitialSample(self, wmg):
"""
Generate an initial sample for the Markov chain. This function will return a
two-dimensional array of integers, such that for each pair of candidates, cand1 and cand2,
the array contains 1 if more votes rank cand1 above cand2 and 0 otherwise.
ivar: dict<int,<dict,<int,int>>> wmg: A two-dimensional dictionary that associates integer
representations of each pair of candidates, cand1 and cand2, with the number of times
cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. The
dictionary represents a weighted majority graph for an election.
"""
cands = range(len(wmg))
allPairs = itertools.combinations(cands, 2)
V = self.createBinaryRelation(len(cands))
for pair in allPairs:
if wmg[pair[0]+1][pair[1]+1] > 0:
V[pair[0]][pair[1]] = 1
V[pair[1]][pair[0]] = 0
else:
V[pair[0]][pair[1]] = 0
V[pair[1]][pair[0]] = 1
return V | [
"def",
"getInitialSample",
"(",
"self",
",",
"wmg",
")",
":",
"cands",
"=",
"range",
"(",
"len",
"(",
"wmg",
")",
")",
"allPairs",
"=",
"itertools",
".",
"combinations",
"(",
"cands",
",",
"2",
")",
"V",
"=",
"self",
".",
"createBinaryRelation",
"(",
"len",
"(",
"cands",
")",
")",
"for",
"pair",
"in",
"allPairs",
":",
"if",
"wmg",
"[",
"pair",
"[",
"0",
"]",
"+",
"1",
"]",
"[",
"pair",
"[",
"1",
"]",
"+",
"1",
"]",
">",
"0",
":",
"V",
"[",
"pair",
"[",
"0",
"]",
"]",
"[",
"pair",
"[",
"1",
"]",
"]",
"=",
"1",
"V",
"[",
"pair",
"[",
"1",
"]",
"]",
"[",
"pair",
"[",
"0",
"]",
"]",
"=",
"0",
"else",
":",
"V",
"[",
"pair",
"[",
"0",
"]",
"]",
"[",
"pair",
"[",
"1",
"]",
"]",
"=",
"0",
"V",
"[",
"pair",
"[",
"1",
"]",
"]",
"[",
"pair",
"[",
"0",
"]",
"]",
"=",
"1",
"return",
"V"
] | Generate an initial sample for the Markov chain. This function will return a
two-dimensional array of integers, such that for each pair of candidates, cand1 and cand2,
the array contains 1 if more votes rank cand1 above cand2 and 0 otherwise.
ivar: dict<int,<dict,<int,int>>> wmg: A two-dimensional dictionary that associates integer
representations of each pair of candidates, cand1 and cand2, with the number of times
cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. The
dictionary represents a weighted majority graph for an election. | [
"Generate",
"an",
"initial",
"sample",
"for",
"the",
"Markov",
"chain",
".",
"This",
"function",
"will",
"return",
"a",
"two",
"-",
"dimensional",
"array",
"of",
"integers",
"such",
"that",
"for",
"each",
"pair",
"of",
"candidates",
"cand1",
"and",
"cand2",
"the",
"array",
"contains",
"1",
"if",
"more",
"votes",
"rank",
"cand1",
"above",
"cand2",
"and",
"0",
"otherwise",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmc.py#L341-L363 | train |
tamasgal/km3pipe | pipeinspector/app.py | filter_input | def filter_input(keys, raw):
"""Adds fancy mouse wheel functionality and VI navigation to ListBox"""
if len(keys) == 1:
if keys[0] in UI.keys['up']:
keys[0] = 'up'
elif keys[0] in UI.keys['down']:
keys[0] = 'down'
elif len(keys[0]) == 4 and keys[0][0] == 'mouse press':
if keys[0][1] == 4:
keys[0] = 'up'
elif keys[0][1] == 5:
keys[0] = 'down'
return keys | python | def filter_input(keys, raw):
"""Adds fancy mouse wheel functionality and VI navigation to ListBox"""
if len(keys) == 1:
if keys[0] in UI.keys['up']:
keys[0] = 'up'
elif keys[0] in UI.keys['down']:
keys[0] = 'down'
elif len(keys[0]) == 4 and keys[0][0] == 'mouse press':
if keys[0][1] == 4:
keys[0] = 'up'
elif keys[0][1] == 5:
keys[0] = 'down'
return keys | [
"def",
"filter_input",
"(",
"keys",
",",
"raw",
")",
":",
"if",
"len",
"(",
"keys",
")",
"==",
"1",
":",
"if",
"keys",
"[",
"0",
"]",
"in",
"UI",
".",
"keys",
"[",
"'up'",
"]",
":",
"keys",
"[",
"0",
"]",
"=",
"'up'",
"elif",
"keys",
"[",
"0",
"]",
"in",
"UI",
".",
"keys",
"[",
"'down'",
"]",
":",
"keys",
"[",
"0",
"]",
"=",
"'down'",
"elif",
"len",
"(",
"keys",
"[",
"0",
"]",
")",
"==",
"4",
"and",
"keys",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'mouse press'",
":",
"if",
"keys",
"[",
"0",
"]",
"[",
"1",
"]",
"==",
"4",
":",
"keys",
"[",
"0",
"]",
"=",
"'up'",
"elif",
"keys",
"[",
"0",
"]",
"[",
"1",
"]",
"==",
"5",
":",
"keys",
"[",
"0",
"]",
"=",
"'down'",
"return",
"keys"
] | Adds fancy mouse wheel functionality and VI navigation to ListBox | [
"Adds",
"fancy",
"mouse",
"wheel",
"functionality",
"and",
"VI",
"navigation",
"to",
"ListBox"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/pipeinspector/app.py#L39-L51 | train |
lexibank/pylexibank | src/pylexibank/lingpy_util.py | wordlist2cognates | def wordlist2cognates(wordlist, source, expert='expert', ref='cogid'):
"""Turn a wordlist into a cognate set list, using the cldf parameters."""
for k in wordlist:
yield dict(
Form_ID=wordlist[k, 'lid'],
ID=k,
Form=wordlist[k, 'ipa'],
Cognateset_ID='{0}-{1}'.format(
slug(wordlist[k, 'concept']), wordlist[k, ref]),
Cognate_Detection_Method=expert,
Source=source) | python | def wordlist2cognates(wordlist, source, expert='expert', ref='cogid'):
"""Turn a wordlist into a cognate set list, using the cldf parameters."""
for k in wordlist:
yield dict(
Form_ID=wordlist[k, 'lid'],
ID=k,
Form=wordlist[k, 'ipa'],
Cognateset_ID='{0}-{1}'.format(
slug(wordlist[k, 'concept']), wordlist[k, ref]),
Cognate_Detection_Method=expert,
Source=source) | [
"def",
"wordlist2cognates",
"(",
"wordlist",
",",
"source",
",",
"expert",
"=",
"'expert'",
",",
"ref",
"=",
"'cogid'",
")",
":",
"for",
"k",
"in",
"wordlist",
":",
"yield",
"dict",
"(",
"Form_ID",
"=",
"wordlist",
"[",
"k",
",",
"'lid'",
"]",
",",
"ID",
"=",
"k",
",",
"Form",
"=",
"wordlist",
"[",
"k",
",",
"'ipa'",
"]",
",",
"Cognateset_ID",
"=",
"'{0}-{1}'",
".",
"format",
"(",
"slug",
"(",
"wordlist",
"[",
"k",
",",
"'concept'",
"]",
")",
",",
"wordlist",
"[",
"k",
",",
"ref",
"]",
")",
",",
"Cognate_Detection_Method",
"=",
"expert",
",",
"Source",
"=",
"source",
")"
] | Turn a wordlist into a cognate set list, using the cldf parameters. | [
"Turn",
"a",
"wordlist",
"into",
"a",
"cognate",
"set",
"list",
"using",
"the",
"cldf",
"parameters",
"."
] | c28e7f122f20de1232623dd7003cb5b01535e581 | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L9-L19 | train |
lexibank/pylexibank | src/pylexibank/lingpy_util.py | _cldf2wld | def _cldf2wld(dataset):
"""Make lingpy-compatible dictinary out of cldf main data."""
header = [f for f in dataset.dataset.lexeme_class.fieldnames() if f != 'ID']
D = {0: ['lid'] + [h.lower() for h in header]}
for idx, row in enumerate(dataset.objects['FormTable']):
row = deepcopy(row)
row['Segments'] = ' '.join(row['Segments'])
D[idx + 1] = [row['ID']] + [row[h] for h in header]
return D | python | def _cldf2wld(dataset):
"""Make lingpy-compatible dictinary out of cldf main data."""
header = [f for f in dataset.dataset.lexeme_class.fieldnames() if f != 'ID']
D = {0: ['lid'] + [h.lower() for h in header]}
for idx, row in enumerate(dataset.objects['FormTable']):
row = deepcopy(row)
row['Segments'] = ' '.join(row['Segments'])
D[idx + 1] = [row['ID']] + [row[h] for h in header]
return D | [
"def",
"_cldf2wld",
"(",
"dataset",
")",
":",
"header",
"=",
"[",
"f",
"for",
"f",
"in",
"dataset",
".",
"dataset",
".",
"lexeme_class",
".",
"fieldnames",
"(",
")",
"if",
"f",
"!=",
"'ID'",
"]",
"D",
"=",
"{",
"0",
":",
"[",
"'lid'",
"]",
"+",
"[",
"h",
".",
"lower",
"(",
")",
"for",
"h",
"in",
"header",
"]",
"}",
"for",
"idx",
",",
"row",
"in",
"enumerate",
"(",
"dataset",
".",
"objects",
"[",
"'FormTable'",
"]",
")",
":",
"row",
"=",
"deepcopy",
"(",
"row",
")",
"row",
"[",
"'Segments'",
"]",
"=",
"' '",
".",
"join",
"(",
"row",
"[",
"'Segments'",
"]",
")",
"D",
"[",
"idx",
"+",
"1",
"]",
"=",
"[",
"row",
"[",
"'ID'",
"]",
"]",
"+",
"[",
"row",
"[",
"h",
"]",
"for",
"h",
"in",
"header",
"]",
"return",
"D"
] | Make lingpy-compatible dictinary out of cldf main data. | [
"Make",
"lingpy",
"-",
"compatible",
"dictinary",
"out",
"of",
"cldf",
"main",
"data",
"."
] | c28e7f122f20de1232623dd7003cb5b01535e581 | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L22-L30 | train |
lexibank/pylexibank | src/pylexibank/lingpy_util.py | _cldf2lexstat | def _cldf2lexstat(
dataset,
segments='segments',
transcription='value',
row='parameter_id',
col='language_id'):
"""Read LexStat object from cldf dataset."""
D = _cldf2wld(dataset)
return lingpy.LexStat(D, segments=segments, transcription=transcription, row=row, col=col) | python | def _cldf2lexstat(
dataset,
segments='segments',
transcription='value',
row='parameter_id',
col='language_id'):
"""Read LexStat object from cldf dataset."""
D = _cldf2wld(dataset)
return lingpy.LexStat(D, segments=segments, transcription=transcription, row=row, col=col) | [
"def",
"_cldf2lexstat",
"(",
"dataset",
",",
"segments",
"=",
"'segments'",
",",
"transcription",
"=",
"'value'",
",",
"row",
"=",
"'parameter_id'",
",",
"col",
"=",
"'language_id'",
")",
":",
"D",
"=",
"_cldf2wld",
"(",
"dataset",
")",
"return",
"lingpy",
".",
"LexStat",
"(",
"D",
",",
"segments",
"=",
"segments",
",",
"transcription",
"=",
"transcription",
",",
"row",
"=",
"row",
",",
"col",
"=",
"col",
")"
] | Read LexStat object from cldf dataset. | [
"Read",
"LexStat",
"object",
"from",
"cldf",
"dataset",
"."
] | c28e7f122f20de1232623dd7003cb5b01535e581 | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L33-L41 | train |
lexibank/pylexibank | src/pylexibank/lingpy_util.py | _cldf2wordlist | def _cldf2wordlist(dataset, row='parameter_id', col='language_id'):
"""Read worldist object from cldf dataset."""
return lingpy.Wordlist(_cldf2wld(dataset), row=row, col=col) | python | def _cldf2wordlist(dataset, row='parameter_id', col='language_id'):
"""Read worldist object from cldf dataset."""
return lingpy.Wordlist(_cldf2wld(dataset), row=row, col=col) | [
"def",
"_cldf2wordlist",
"(",
"dataset",
",",
"row",
"=",
"'parameter_id'",
",",
"col",
"=",
"'language_id'",
")",
":",
"return",
"lingpy",
".",
"Wordlist",
"(",
"_cldf2wld",
"(",
"dataset",
")",
",",
"row",
"=",
"row",
",",
"col",
"=",
"col",
")"
] | Read worldist object from cldf dataset. | [
"Read",
"worldist",
"object",
"from",
"cldf",
"dataset",
"."
] | c28e7f122f20de1232623dd7003cb5b01535e581 | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L44-L46 | train |
lexibank/pylexibank | src/pylexibank/lingpy_util.py | iter_cognates | def iter_cognates(dataset, column='Segments', method='turchin', threshold=0.5, **kw):
"""
Compute cognates automatically for a given dataset.
"""
if method == 'turchin':
for row in dataset.objects['FormTable']:
sounds = ''.join(lingpy.tokens2class(row[column], 'dolgo'))
if sounds.startswith('V'):
sounds = 'H' + sounds
sounds = '-'.join([s for s in sounds if s != 'V'][:2])
cogid = slug(row['Parameter_ID']) + '-' + sounds
if '0' not in sounds:
yield dict(
Form_ID=row['ID'],
Form=row['Value'],
Cognateset_ID=cogid,
Cognate_Detection_Method='CMM')
if method in ['sca', 'lexstat']:
lex = _cldf2lexstat(dataset)
if method == 'lexstat':
lex.get_scorer(**kw)
lex.cluster(method=method, threshold=threshold, ref='cogid')
for k in lex:
yield Cognate(
Form_ID=lex[k, 'lid'],
Form=lex[k, 'value'],
Cognateset_ID=lex[k, 'cogid'],
Cognate_Detection_Method=method + '-t{0:.2f}'.format(threshold)) | python | def iter_cognates(dataset, column='Segments', method='turchin', threshold=0.5, **kw):
"""
Compute cognates automatically for a given dataset.
"""
if method == 'turchin':
for row in dataset.objects['FormTable']:
sounds = ''.join(lingpy.tokens2class(row[column], 'dolgo'))
if sounds.startswith('V'):
sounds = 'H' + sounds
sounds = '-'.join([s for s in sounds if s != 'V'][:2])
cogid = slug(row['Parameter_ID']) + '-' + sounds
if '0' not in sounds:
yield dict(
Form_ID=row['ID'],
Form=row['Value'],
Cognateset_ID=cogid,
Cognate_Detection_Method='CMM')
if method in ['sca', 'lexstat']:
lex = _cldf2lexstat(dataset)
if method == 'lexstat':
lex.get_scorer(**kw)
lex.cluster(method=method, threshold=threshold, ref='cogid')
for k in lex:
yield Cognate(
Form_ID=lex[k, 'lid'],
Form=lex[k, 'value'],
Cognateset_ID=lex[k, 'cogid'],
Cognate_Detection_Method=method + '-t{0:.2f}'.format(threshold)) | [
"def",
"iter_cognates",
"(",
"dataset",
",",
"column",
"=",
"'Segments'",
",",
"method",
"=",
"'turchin'",
",",
"threshold",
"=",
"0.5",
",",
"*",
"*",
"kw",
")",
":",
"if",
"method",
"==",
"'turchin'",
":",
"for",
"row",
"in",
"dataset",
".",
"objects",
"[",
"'FormTable'",
"]",
":",
"sounds",
"=",
"''",
".",
"join",
"(",
"lingpy",
".",
"tokens2class",
"(",
"row",
"[",
"column",
"]",
",",
"'dolgo'",
")",
")",
"if",
"sounds",
".",
"startswith",
"(",
"'V'",
")",
":",
"sounds",
"=",
"'H'",
"+",
"sounds",
"sounds",
"=",
"'-'",
".",
"join",
"(",
"[",
"s",
"for",
"s",
"in",
"sounds",
"if",
"s",
"!=",
"'V'",
"]",
"[",
":",
"2",
"]",
")",
"cogid",
"=",
"slug",
"(",
"row",
"[",
"'Parameter_ID'",
"]",
")",
"+",
"'-'",
"+",
"sounds",
"if",
"'0'",
"not",
"in",
"sounds",
":",
"yield",
"dict",
"(",
"Form_ID",
"=",
"row",
"[",
"'ID'",
"]",
",",
"Form",
"=",
"row",
"[",
"'Value'",
"]",
",",
"Cognateset_ID",
"=",
"cogid",
",",
"Cognate_Detection_Method",
"=",
"'CMM'",
")",
"if",
"method",
"in",
"[",
"'sca'",
",",
"'lexstat'",
"]",
":",
"lex",
"=",
"_cldf2lexstat",
"(",
"dataset",
")",
"if",
"method",
"==",
"'lexstat'",
":",
"lex",
".",
"get_scorer",
"(",
"*",
"*",
"kw",
")",
"lex",
".",
"cluster",
"(",
"method",
"=",
"method",
",",
"threshold",
"=",
"threshold",
",",
"ref",
"=",
"'cogid'",
")",
"for",
"k",
"in",
"lex",
":",
"yield",
"Cognate",
"(",
"Form_ID",
"=",
"lex",
"[",
"k",
",",
"'lid'",
"]",
",",
"Form",
"=",
"lex",
"[",
"k",
",",
"'value'",
"]",
",",
"Cognateset_ID",
"=",
"lex",
"[",
"k",
",",
"'cogid'",
"]",
",",
"Cognate_Detection_Method",
"=",
"method",
"+",
"'-t{0:.2f}'",
".",
"format",
"(",
"threshold",
")",
")"
] | Compute cognates automatically for a given dataset. | [
"Compute",
"cognates",
"automatically",
"for",
"a",
"given",
"dataset",
"."
] | c28e7f122f20de1232623dd7003cb5b01535e581 | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L49-L77 | train |
lexibank/pylexibank | src/pylexibank/lingpy_util.py | iter_alignments | def iter_alignments(dataset, cognate_sets, column='Segments', method='library'):
"""
Function computes automatic alignments and writes them to file.
"""
if not isinstance(dataset, lingpy.basic.parser.QLCParser):
wordlist = _cldf2wordlist(dataset)
cognates = {r['Form_ID']: r for r in cognate_sets}
wordlist.add_entries(
'cogid',
'lid',
lambda x: cognates[x]['Cognateset_ID'] if x in cognates else 0)
alm = lingpy.Alignments(
wordlist,
ref='cogid',
row='parameter_id',
col='language_id',
segments=column.lower())
alm.align(method=method)
for k in alm:
if alm[k, 'lid'] in cognates:
cognate = cognates[alm[k, 'lid']]
cognate['Alignment'] = alm[k, 'alignment']
cognate['Alignment_Method'] = method
else:
alm = lingpy.Alignments(dataset, ref='cogid')
alm.align(method=method)
for cognate in cognate_sets:
idx = cognate['ID'] or cognate['Form_ID']
cognate['Alignment'] = alm[int(idx), 'alignment']
cognate['Alignment_Method'] = 'SCA-' + method | python | def iter_alignments(dataset, cognate_sets, column='Segments', method='library'):
"""
Function computes automatic alignments and writes them to file.
"""
if not isinstance(dataset, lingpy.basic.parser.QLCParser):
wordlist = _cldf2wordlist(dataset)
cognates = {r['Form_ID']: r for r in cognate_sets}
wordlist.add_entries(
'cogid',
'lid',
lambda x: cognates[x]['Cognateset_ID'] if x in cognates else 0)
alm = lingpy.Alignments(
wordlist,
ref='cogid',
row='parameter_id',
col='language_id',
segments=column.lower())
alm.align(method=method)
for k in alm:
if alm[k, 'lid'] in cognates:
cognate = cognates[alm[k, 'lid']]
cognate['Alignment'] = alm[k, 'alignment']
cognate['Alignment_Method'] = method
else:
alm = lingpy.Alignments(dataset, ref='cogid')
alm.align(method=method)
for cognate in cognate_sets:
idx = cognate['ID'] or cognate['Form_ID']
cognate['Alignment'] = alm[int(idx), 'alignment']
cognate['Alignment_Method'] = 'SCA-' + method | [
"def",
"iter_alignments",
"(",
"dataset",
",",
"cognate_sets",
",",
"column",
"=",
"'Segments'",
",",
"method",
"=",
"'library'",
")",
":",
"if",
"not",
"isinstance",
"(",
"dataset",
",",
"lingpy",
".",
"basic",
".",
"parser",
".",
"QLCParser",
")",
":",
"wordlist",
"=",
"_cldf2wordlist",
"(",
"dataset",
")",
"cognates",
"=",
"{",
"r",
"[",
"'Form_ID'",
"]",
":",
"r",
"for",
"r",
"in",
"cognate_sets",
"}",
"wordlist",
".",
"add_entries",
"(",
"'cogid'",
",",
"'lid'",
",",
"lambda",
"x",
":",
"cognates",
"[",
"x",
"]",
"[",
"'Cognateset_ID'",
"]",
"if",
"x",
"in",
"cognates",
"else",
"0",
")",
"alm",
"=",
"lingpy",
".",
"Alignments",
"(",
"wordlist",
",",
"ref",
"=",
"'cogid'",
",",
"row",
"=",
"'parameter_id'",
",",
"col",
"=",
"'language_id'",
",",
"segments",
"=",
"column",
".",
"lower",
"(",
")",
")",
"alm",
".",
"align",
"(",
"method",
"=",
"method",
")",
"for",
"k",
"in",
"alm",
":",
"if",
"alm",
"[",
"k",
",",
"'lid'",
"]",
"in",
"cognates",
":",
"cognate",
"=",
"cognates",
"[",
"alm",
"[",
"k",
",",
"'lid'",
"]",
"]",
"cognate",
"[",
"'Alignment'",
"]",
"=",
"alm",
"[",
"k",
",",
"'alignment'",
"]",
"cognate",
"[",
"'Alignment_Method'",
"]",
"=",
"method",
"else",
":",
"alm",
"=",
"lingpy",
".",
"Alignments",
"(",
"dataset",
",",
"ref",
"=",
"'cogid'",
")",
"alm",
".",
"align",
"(",
"method",
"=",
"method",
")",
"for",
"cognate",
"in",
"cognate_sets",
":",
"idx",
"=",
"cognate",
"[",
"'ID'",
"]",
"or",
"cognate",
"[",
"'Form_ID'",
"]",
"cognate",
"[",
"'Alignment'",
"]",
"=",
"alm",
"[",
"int",
"(",
"idx",
")",
",",
"'alignment'",
"]",
"cognate",
"[",
"'Alignment_Method'",
"]",
"=",
"'SCA-'",
"+",
"method"
] | Function computes automatic alignments and writes them to file. | [
"Function",
"computes",
"automatic",
"alignments",
"and",
"writes",
"them",
"to",
"file",
"."
] | c28e7f122f20de1232623dd7003cb5b01535e581 | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/lingpy_util.py#L80-L110 | train |
tamasgal/km3pipe | km3pipe/utils/tohdf5.py | tohdf5 | def tohdf5(input_files, output_file, n_events, conv_times_to_jte, **kwargs):
"""Convert Any file to HDF5 file"""
if len(input_files) > 1:
cprint(
"Preparing to convert {} files to HDF5.".format(len(input_files))
)
from km3pipe import Pipeline # noqa
from km3pipe.io import GenericPump, HDF5Sink, HDF5MetaData # noqa
for input_file in input_files:
cprint("Converting '{}'...".format(input_file))
if len(input_files) > 1:
output_file = input_file + '.h5'
meta_data = kwargs.copy()
meta_data['origin'] = input_file
pipe = Pipeline()
pipe.attach(HDF5MetaData, data=meta_data)
pipe.attach(GenericPump, filenames=input_file, **kwargs)
pipe.attach(StatusBar, every=250)
if conv_times_to_jte:
from km3modules.mc import MCTimeCorrector
pipe.attach(MCTimeCorrector)
pipe.attach(HDF5Sink, filename=output_file, **kwargs)
pipe.drain(n_events)
cprint("File '{}' was converted.".format(input_file)) | python | def tohdf5(input_files, output_file, n_events, conv_times_to_jte, **kwargs):
"""Convert Any file to HDF5 file"""
if len(input_files) > 1:
cprint(
"Preparing to convert {} files to HDF5.".format(len(input_files))
)
from km3pipe import Pipeline # noqa
from km3pipe.io import GenericPump, HDF5Sink, HDF5MetaData # noqa
for input_file in input_files:
cprint("Converting '{}'...".format(input_file))
if len(input_files) > 1:
output_file = input_file + '.h5'
meta_data = kwargs.copy()
meta_data['origin'] = input_file
pipe = Pipeline()
pipe.attach(HDF5MetaData, data=meta_data)
pipe.attach(GenericPump, filenames=input_file, **kwargs)
pipe.attach(StatusBar, every=250)
if conv_times_to_jte:
from km3modules.mc import MCTimeCorrector
pipe.attach(MCTimeCorrector)
pipe.attach(HDF5Sink, filename=output_file, **kwargs)
pipe.drain(n_events)
cprint("File '{}' was converted.".format(input_file)) | [
"def",
"tohdf5",
"(",
"input_files",
",",
"output_file",
",",
"n_events",
",",
"conv_times_to_jte",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"input_files",
")",
">",
"1",
":",
"cprint",
"(",
"\"Preparing to convert {} files to HDF5.\"",
".",
"format",
"(",
"len",
"(",
"input_files",
")",
")",
")",
"from",
"km3pipe",
"import",
"Pipeline",
"# noqa",
"from",
"km3pipe",
".",
"io",
"import",
"GenericPump",
",",
"HDF5Sink",
",",
"HDF5MetaData",
"# noqa",
"for",
"input_file",
"in",
"input_files",
":",
"cprint",
"(",
"\"Converting '{}'...\"",
".",
"format",
"(",
"input_file",
")",
")",
"if",
"len",
"(",
"input_files",
")",
">",
"1",
":",
"output_file",
"=",
"input_file",
"+",
"'.h5'",
"meta_data",
"=",
"kwargs",
".",
"copy",
"(",
")",
"meta_data",
"[",
"'origin'",
"]",
"=",
"input_file",
"pipe",
"=",
"Pipeline",
"(",
")",
"pipe",
".",
"attach",
"(",
"HDF5MetaData",
",",
"data",
"=",
"meta_data",
")",
"pipe",
".",
"attach",
"(",
"GenericPump",
",",
"filenames",
"=",
"input_file",
",",
"*",
"*",
"kwargs",
")",
"pipe",
".",
"attach",
"(",
"StatusBar",
",",
"every",
"=",
"250",
")",
"if",
"conv_times_to_jte",
":",
"from",
"km3modules",
".",
"mc",
"import",
"MCTimeCorrector",
"pipe",
".",
"attach",
"(",
"MCTimeCorrector",
")",
"pipe",
".",
"attach",
"(",
"HDF5Sink",
",",
"filename",
"=",
"output_file",
",",
"*",
"*",
"kwargs",
")",
"pipe",
".",
"drain",
"(",
"n_events",
")",
"cprint",
"(",
"\"File '{}' was converted.\"",
".",
"format",
"(",
"input_file",
")",
")"
] | Convert Any file to HDF5 file | [
"Convert",
"Any",
"file",
"to",
"HDF5",
"file"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/tohdf5.py#L46-L73 | train |
IRC-SPHERE/HyperStream | hyperstream/channels/channel_manager.py | ChannelManager.update_channels | def update_channels(self):
"""
Pulls out all of the stream definitions from the database, and populates the channels with stream references
"""
logging.info("Updating channels")
with switch_db(StreamDefinitionModel, 'hyperstream'):
for s in StreamDefinitionModel.objects():
try:
stream_id = StreamId(name=s.stream_id.name, meta_data=s.stream_id.meta_data)
except AttributeError as e:
raise e
logging.debug("Processing {}".format(stream_id))
try:
# This can fail if a plugin has been defined by a different instantiation of HyperStream on the same
# database.
channel = self.get_channel(s.channel_id)
except ChannelNotFoundError as e:
logging.warn(e)
continue
# calculated_intervals = TimeIntervals(map(lambda x: (x.start, x.end), s.calculated_intervals))
last_accessed = utcnow()
last_updated = s.last_updated if s.last_updated else utcnow()
if stream_id in channel.streams:
if isinstance(channel, (AssetsChannel, AssetsFileChannel)):
continue
raise StreamAlreadyExistsError(stream_id)
from . import MemoryChannel, DatabaseChannel
if isinstance(channel, MemoryChannel):
channel.create_stream(stream_id)
elif isinstance(channel, DatabaseChannel):
if channel == self.assets:
stream_type = AssetStream
else:
stream_type = DatabaseStream
channel.streams[stream_id] = stream_type(
channel=channel,
stream_id=stream_id,
calculated_intervals=None, # Not required since it's initialised from mongo_model in __init__
last_accessed=last_accessed,
last_updated=last_updated,
sandbox=s.sandbox,
mongo_model=s
)
else:
logging.warn("Unable to parse stream {}".format(stream_id)) | python | def update_channels(self):
"""
Pulls out all of the stream definitions from the database, and populates the channels with stream references
"""
logging.info("Updating channels")
with switch_db(StreamDefinitionModel, 'hyperstream'):
for s in StreamDefinitionModel.objects():
try:
stream_id = StreamId(name=s.stream_id.name, meta_data=s.stream_id.meta_data)
except AttributeError as e:
raise e
logging.debug("Processing {}".format(stream_id))
try:
# This can fail if a plugin has been defined by a different instantiation of HyperStream on the same
# database.
channel = self.get_channel(s.channel_id)
except ChannelNotFoundError as e:
logging.warn(e)
continue
# calculated_intervals = TimeIntervals(map(lambda x: (x.start, x.end), s.calculated_intervals))
last_accessed = utcnow()
last_updated = s.last_updated if s.last_updated else utcnow()
if stream_id in channel.streams:
if isinstance(channel, (AssetsChannel, AssetsFileChannel)):
continue
raise StreamAlreadyExistsError(stream_id)
from . import MemoryChannel, DatabaseChannel
if isinstance(channel, MemoryChannel):
channel.create_stream(stream_id)
elif isinstance(channel, DatabaseChannel):
if channel == self.assets:
stream_type = AssetStream
else:
stream_type = DatabaseStream
channel.streams[stream_id] = stream_type(
channel=channel,
stream_id=stream_id,
calculated_intervals=None, # Not required since it's initialised from mongo_model in __init__
last_accessed=last_accessed,
last_updated=last_updated,
sandbox=s.sandbox,
mongo_model=s
)
else:
logging.warn("Unable to parse stream {}".format(stream_id)) | [
"def",
"update_channels",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"\"Updating channels\"",
")",
"with",
"switch_db",
"(",
"StreamDefinitionModel",
",",
"'hyperstream'",
")",
":",
"for",
"s",
"in",
"StreamDefinitionModel",
".",
"objects",
"(",
")",
":",
"try",
":",
"stream_id",
"=",
"StreamId",
"(",
"name",
"=",
"s",
".",
"stream_id",
".",
"name",
",",
"meta_data",
"=",
"s",
".",
"stream_id",
".",
"meta_data",
")",
"except",
"AttributeError",
"as",
"e",
":",
"raise",
"e",
"logging",
".",
"debug",
"(",
"\"Processing {}\"",
".",
"format",
"(",
"stream_id",
")",
")",
"try",
":",
"# This can fail if a plugin has been defined by a different instantiation of HyperStream on the same",
"# database.",
"channel",
"=",
"self",
".",
"get_channel",
"(",
"s",
".",
"channel_id",
")",
"except",
"ChannelNotFoundError",
"as",
"e",
":",
"logging",
".",
"warn",
"(",
"e",
")",
"continue",
"# calculated_intervals = TimeIntervals(map(lambda x: (x.start, x.end), s.calculated_intervals))",
"last_accessed",
"=",
"utcnow",
"(",
")",
"last_updated",
"=",
"s",
".",
"last_updated",
"if",
"s",
".",
"last_updated",
"else",
"utcnow",
"(",
")",
"if",
"stream_id",
"in",
"channel",
".",
"streams",
":",
"if",
"isinstance",
"(",
"channel",
",",
"(",
"AssetsChannel",
",",
"AssetsFileChannel",
")",
")",
":",
"continue",
"raise",
"StreamAlreadyExistsError",
"(",
"stream_id",
")",
"from",
".",
"import",
"MemoryChannel",
",",
"DatabaseChannel",
"if",
"isinstance",
"(",
"channel",
",",
"MemoryChannel",
")",
":",
"channel",
".",
"create_stream",
"(",
"stream_id",
")",
"elif",
"isinstance",
"(",
"channel",
",",
"DatabaseChannel",
")",
":",
"if",
"channel",
"==",
"self",
".",
"assets",
":",
"stream_type",
"=",
"AssetStream",
"else",
":",
"stream_type",
"=",
"DatabaseStream",
"channel",
".",
"streams",
"[",
"stream_id",
"]",
"=",
"stream_type",
"(",
"channel",
"=",
"channel",
",",
"stream_id",
"=",
"stream_id",
",",
"calculated_intervals",
"=",
"None",
",",
"# Not required since it's initialised from mongo_model in __init__",
"last_accessed",
"=",
"last_accessed",
",",
"last_updated",
"=",
"last_updated",
",",
"sandbox",
"=",
"s",
".",
"sandbox",
",",
"mongo_model",
"=",
"s",
")",
"else",
":",
"logging",
".",
"warn",
"(",
"\"Unable to parse stream {}\"",
".",
"format",
"(",
"stream_id",
")",
")"
] | Pulls out all of the stream definitions from the database, and populates the channels with stream references | [
"Pulls",
"out",
"all",
"of",
"the",
"stream",
"definitions",
"from",
"the",
"database",
"and",
"populates",
"the",
"channels",
"with",
"stream",
"references"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/channel_manager.py#L97-L146 | train |
IRC-SPHERE/HyperStream | hyperstream/channels/channel_manager.py | ChannelManager.get_tool_class | def get_tool_class(self, tool):
"""
Gets the actual class which can then be instantiated with its parameters
:param tool: The tool name or id
:type tool: str | unicode | StreamId
:rtype: Tool | MultiOutputTool
:return: The tool class
"""
if isinstance(tool, string_types):
tool_id = StreamId(tool)
elif isinstance(tool, StreamId):
tool_id = tool
else:
raise TypeError(tool)
tool_stream_view = None
# Look in the main tool channel first
if tool_id in self.tools:
tool_stream_view = self.tools[tool_id].window((MIN_DATE, self.tools.up_to_timestamp))
else:
# Otherwise look through all the channels in the order they were defined
for tool_channel in self.tool_channels:
if tool_channel == self.tools:
continue
if tool_id in tool_channel:
# noinspection PyTypeChecker
tool_stream_view = tool_channel[tool_id].window((MIN_DATE, tool_channel.up_to_timestamp))
if tool_stream_view is None:
raise ToolNotFoundError(tool)
# TODO: Use tool versions - here we just take the latest one
last = tool_stream_view.last()
if last is None:
raise ToolNotFoundError(tool)
return tool_stream_view.last().value | python | def get_tool_class(self, tool):
"""
Gets the actual class which can then be instantiated with its parameters
:param tool: The tool name or id
:type tool: str | unicode | StreamId
:rtype: Tool | MultiOutputTool
:return: The tool class
"""
if isinstance(tool, string_types):
tool_id = StreamId(tool)
elif isinstance(tool, StreamId):
tool_id = tool
else:
raise TypeError(tool)
tool_stream_view = None
# Look in the main tool channel first
if tool_id in self.tools:
tool_stream_view = self.tools[tool_id].window((MIN_DATE, self.tools.up_to_timestamp))
else:
# Otherwise look through all the channels in the order they were defined
for tool_channel in self.tool_channels:
if tool_channel == self.tools:
continue
if tool_id in tool_channel:
# noinspection PyTypeChecker
tool_stream_view = tool_channel[tool_id].window((MIN_DATE, tool_channel.up_to_timestamp))
if tool_stream_view is None:
raise ToolNotFoundError(tool)
# TODO: Use tool versions - here we just take the latest one
last = tool_stream_view.last()
if last is None:
raise ToolNotFoundError(tool)
return tool_stream_view.last().value | [
"def",
"get_tool_class",
"(",
"self",
",",
"tool",
")",
":",
"if",
"isinstance",
"(",
"tool",
",",
"string_types",
")",
":",
"tool_id",
"=",
"StreamId",
"(",
"tool",
")",
"elif",
"isinstance",
"(",
"tool",
",",
"StreamId",
")",
":",
"tool_id",
"=",
"tool",
"else",
":",
"raise",
"TypeError",
"(",
"tool",
")",
"tool_stream_view",
"=",
"None",
"# Look in the main tool channel first",
"if",
"tool_id",
"in",
"self",
".",
"tools",
":",
"tool_stream_view",
"=",
"self",
".",
"tools",
"[",
"tool_id",
"]",
".",
"window",
"(",
"(",
"MIN_DATE",
",",
"self",
".",
"tools",
".",
"up_to_timestamp",
")",
")",
"else",
":",
"# Otherwise look through all the channels in the order they were defined",
"for",
"tool_channel",
"in",
"self",
".",
"tool_channels",
":",
"if",
"tool_channel",
"==",
"self",
".",
"tools",
":",
"continue",
"if",
"tool_id",
"in",
"tool_channel",
":",
"# noinspection PyTypeChecker",
"tool_stream_view",
"=",
"tool_channel",
"[",
"tool_id",
"]",
".",
"window",
"(",
"(",
"MIN_DATE",
",",
"tool_channel",
".",
"up_to_timestamp",
")",
")",
"if",
"tool_stream_view",
"is",
"None",
":",
"raise",
"ToolNotFoundError",
"(",
"tool",
")",
"# TODO: Use tool versions - here we just take the latest one",
"last",
"=",
"tool_stream_view",
".",
"last",
"(",
")",
"if",
"last",
"is",
"None",
":",
"raise",
"ToolNotFoundError",
"(",
"tool",
")",
"return",
"tool_stream_view",
".",
"last",
"(",
")",
".",
"value"
] | Gets the actual class which can then be instantiated with its parameters
:param tool: The tool name or id
:type tool: str | unicode | StreamId
:rtype: Tool | MultiOutputTool
:return: The tool class | [
"Gets",
"the",
"actual",
"class",
"which",
"can",
"then",
"be",
"instantiated",
"with",
"its",
"parameters"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/channel_manager.py#L148-L186 | train |
IRC-SPHERE/HyperStream | hyperstream/plate/plate.py | Plate.is_sub_plate | def is_sub_plate(self, other):
"""
Determines if this plate is a sub-plate of another plate -
i.e. has the same meta data but a restricted set of values
:param other: The other plate
:return: True if this plate is a sub-plate of the other plate
"""
if all(v in set(other.values) for v in self.values):
return True
if all(any(all(spv in m for spv in v) for m in map(set, other.values)) for v in self.values):
return True
if other in self.ancestor_plates: # added by MK, but still not sure whether all cases are covered
return True
return False | python | def is_sub_plate(self, other):
"""
Determines if this plate is a sub-plate of another plate -
i.e. has the same meta data but a restricted set of values
:param other: The other plate
:return: True if this plate is a sub-plate of the other plate
"""
if all(v in set(other.values) for v in self.values):
return True
if all(any(all(spv in m for spv in v) for m in map(set, other.values)) for v in self.values):
return True
if other in self.ancestor_plates: # added by MK, but still not sure whether all cases are covered
return True
return False | [
"def",
"is_sub_plate",
"(",
"self",
",",
"other",
")",
":",
"if",
"all",
"(",
"v",
"in",
"set",
"(",
"other",
".",
"values",
")",
"for",
"v",
"in",
"self",
".",
"values",
")",
":",
"return",
"True",
"if",
"all",
"(",
"any",
"(",
"all",
"(",
"spv",
"in",
"m",
"for",
"spv",
"in",
"v",
")",
"for",
"m",
"in",
"map",
"(",
"set",
",",
"other",
".",
"values",
")",
")",
"for",
"v",
"in",
"self",
".",
"values",
")",
":",
"return",
"True",
"if",
"other",
"in",
"self",
".",
"ancestor_plates",
":",
"# added by MK, but still not sure whether all cases are covered",
"return",
"True",
"return",
"False"
] | Determines if this plate is a sub-plate of another plate -
i.e. has the same meta data but a restricted set of values
:param other: The other plate
:return: True if this plate is a sub-plate of the other plate | [
"Determines",
"if",
"this",
"plate",
"is",
"a",
"sub",
"-",
"plate",
"of",
"another",
"plate",
"-",
"i",
".",
"e",
".",
"has",
"the",
"same",
"meta",
"data",
"but",
"a",
"restricted",
"set",
"of",
"values"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/plate/plate.py#L130-L144 | train |
thebigmunch/google-music-utils | src/google_music_utils/utils.py | normalize_value | def normalize_value(value):
"""Normalize metadata value to improve match accuracy."""
value = str(value)
value = value.casefold()
value = re.sub(r'\/\s*\d+', '', value) # Remove "/<totaltracks>" from track number.
value = re.sub(r'^0+([0-9]+)', r'\1', value) # Remove leading zero(s) from track number.
value = re.sub(r'^(\d+)\.+', r'\1', value) # Remove dots from track number.
value = re.sub(r'[^\w\s]', '', value) # Remove leading non-word characters.
value = re.sub(r'^the\s+', '', value) # Remove leading "the".
value = re.sub(r'^\s+', '', value) # Remove leading spaces.
value = re.sub(r'\s+$', '', value) # Remove trailing spaces.
value = re.sub(r'\s+', ' ', value) # Reduce multiple spaces to a single space.
return value | python | def normalize_value(value):
"""Normalize metadata value to improve match accuracy."""
value = str(value)
value = value.casefold()
value = re.sub(r'\/\s*\d+', '', value) # Remove "/<totaltracks>" from track number.
value = re.sub(r'^0+([0-9]+)', r'\1', value) # Remove leading zero(s) from track number.
value = re.sub(r'^(\d+)\.+', r'\1', value) # Remove dots from track number.
value = re.sub(r'[^\w\s]', '', value) # Remove leading non-word characters.
value = re.sub(r'^the\s+', '', value) # Remove leading "the".
value = re.sub(r'^\s+', '', value) # Remove leading spaces.
value = re.sub(r'\s+$', '', value) # Remove trailing spaces.
value = re.sub(r'\s+', ' ', value) # Reduce multiple spaces to a single space.
return value | [
"def",
"normalize_value",
"(",
"value",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
"value",
"=",
"value",
".",
"casefold",
"(",
")",
"value",
"=",
"re",
".",
"sub",
"(",
"r'\\/\\s*\\d+'",
",",
"''",
",",
"value",
")",
"# Remove \"/<totaltracks>\" from track number.",
"value",
"=",
"re",
".",
"sub",
"(",
"r'^0+([0-9]+)'",
",",
"r'\\1'",
",",
"value",
")",
"# Remove leading zero(s) from track number.",
"value",
"=",
"re",
".",
"sub",
"(",
"r'^(\\d+)\\.+'",
",",
"r'\\1'",
",",
"value",
")",
"# Remove dots from track number.",
"value",
"=",
"re",
".",
"sub",
"(",
"r'[^\\w\\s]'",
",",
"''",
",",
"value",
")",
"# Remove leading non-word characters.",
"value",
"=",
"re",
".",
"sub",
"(",
"r'^the\\s+'",
",",
"''",
",",
"value",
")",
"# Remove leading \"the\".",
"value",
"=",
"re",
".",
"sub",
"(",
"r'^\\s+'",
",",
"''",
",",
"value",
")",
"# Remove leading spaces.",
"value",
"=",
"re",
".",
"sub",
"(",
"r'\\s+$'",
",",
"''",
",",
"value",
")",
"# Remove trailing spaces.",
"value",
"=",
"re",
".",
"sub",
"(",
"r'\\s+'",
",",
"' '",
",",
"value",
")",
"# Reduce multiple spaces to a single space.",
"return",
"value"
] | Normalize metadata value to improve match accuracy. | [
"Normalize",
"metadata",
"value",
"to",
"improve",
"match",
"accuracy",
"."
] | 2e8873defe7d5aab7321b9d5ec8a80d72687578e | https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/utils.py#L53-L68 | train |
tamasgal/km3pipe | km3pipe/hardware.py | Detector._init_from_file | def _init_from_file(self, filename):
"""Create detector from detx file."""
if not filename.endswith("detx"):
raise NotImplementedError('Only the detx format is supported.')
self._open_file(filename)
self._extract_comments()
self._parse_header()
self._parse_doms()
self._det_file.close() | python | def _init_from_file(self, filename):
"""Create detector from detx file."""
if not filename.endswith("detx"):
raise NotImplementedError('Only the detx format is supported.')
self._open_file(filename)
self._extract_comments()
self._parse_header()
self._parse_doms()
self._det_file.close() | [
"def",
"_init_from_file",
"(",
"self",
",",
"filename",
")",
":",
"if",
"not",
"filename",
".",
"endswith",
"(",
"\"detx\"",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Only the detx format is supported.'",
")",
"self",
".",
"_open_file",
"(",
"filename",
")",
"self",
".",
"_extract_comments",
"(",
")",
"self",
".",
"_parse_header",
"(",
")",
"self",
".",
"_parse_doms",
"(",
")",
"self",
".",
"_det_file",
".",
"close",
"(",
")"
] | Create detector from detx file. | [
"Create",
"detector",
"from",
"detx",
"file",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L92-L100 | train |
tamasgal/km3pipe | km3pipe/hardware.py | Detector._readline | def _readline(self, ignore_comments=True):
"""The next line of the DETX file, optionally ignores comments"""
while True:
line = self._det_file.readline()
if line == '':
return line # To conform the EOF behaviour of .readline()
line = line.strip()
if line == '':
continue # white-space-only line
if line.startswith('#'):
if not ignore_comments:
return line
else:
return line | python | def _readline(self, ignore_comments=True):
"""The next line of the DETX file, optionally ignores comments"""
while True:
line = self._det_file.readline()
if line == '':
return line # To conform the EOF behaviour of .readline()
line = line.strip()
if line == '':
continue # white-space-only line
if line.startswith('#'):
if not ignore_comments:
return line
else:
return line | [
"def",
"_readline",
"(",
"self",
",",
"ignore_comments",
"=",
"True",
")",
":",
"while",
"True",
":",
"line",
"=",
"self",
".",
"_det_file",
".",
"readline",
"(",
")",
"if",
"line",
"==",
"''",
":",
"return",
"line",
"# To conform the EOF behaviour of .readline()",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
"==",
"''",
":",
"continue",
"# white-space-only line",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"if",
"not",
"ignore_comments",
":",
"return",
"line",
"else",
":",
"return",
"line"
] | The next line of the DETX file, optionally ignores comments | [
"The",
"next",
"line",
"of",
"the",
"DETX",
"file",
"optionally",
"ignores",
"comments"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L106-L119 | train |
tamasgal/km3pipe | km3pipe/hardware.py | Detector._extract_comments | def _extract_comments(self):
"""Retrieve all comments from the file"""
self._det_file.seek(0, 0)
for line in self._det_file.readlines():
line = line.strip()
if line.startswith('#'):
self.add_comment(line[1:]) | python | def _extract_comments(self):
"""Retrieve all comments from the file"""
self._det_file.seek(0, 0)
for line in self._det_file.readlines():
line = line.strip()
if line.startswith('#'):
self.add_comment(line[1:]) | [
"def",
"_extract_comments",
"(",
"self",
")",
":",
"self",
".",
"_det_file",
".",
"seek",
"(",
"0",
",",
"0",
")",
"for",
"line",
"in",
"self",
".",
"_det_file",
".",
"readlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"self",
".",
"add_comment",
"(",
"line",
"[",
"1",
":",
"]",
")"
] | Retrieve all comments from the file | [
"Retrieve",
"all",
"comments",
"from",
"the",
"file"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L121-L127 | train |
tamasgal/km3pipe | km3pipe/hardware.py | Detector._parse_header | def _parse_header(self):
"""Extract information from the header of the detector file"""
self.print("Parsing the DETX header")
self._det_file.seek(0, 0)
first_line = self._readline()
try:
self.det_id, self.n_doms = split(first_line, int)
self.version = 'v1'
except ValueError:
det_id, self.version = first_line.split()
self.det_id = int(det_id)
validity = self._readline().strip()
self.valid_from, self.valid_until = split(validity, float)
raw_utm_info = self._readline().strip().split(' ')
try:
self.utm_info = UTMInfo(*raw_utm_info[1:])
except TypeError:
log.warning("Missing UTM information.")
n_doms = self._readline()
self.n_doms = int(n_doms) | python | def _parse_header(self):
"""Extract information from the header of the detector file"""
self.print("Parsing the DETX header")
self._det_file.seek(0, 0)
first_line = self._readline()
try:
self.det_id, self.n_doms = split(first_line, int)
self.version = 'v1'
except ValueError:
det_id, self.version = first_line.split()
self.det_id = int(det_id)
validity = self._readline().strip()
self.valid_from, self.valid_until = split(validity, float)
raw_utm_info = self._readline().strip().split(' ')
try:
self.utm_info = UTMInfo(*raw_utm_info[1:])
except TypeError:
log.warning("Missing UTM information.")
n_doms = self._readline()
self.n_doms = int(n_doms) | [
"def",
"_parse_header",
"(",
"self",
")",
":",
"self",
".",
"print",
"(",
"\"Parsing the DETX header\"",
")",
"self",
".",
"_det_file",
".",
"seek",
"(",
"0",
",",
"0",
")",
"first_line",
"=",
"self",
".",
"_readline",
"(",
")",
"try",
":",
"self",
".",
"det_id",
",",
"self",
".",
"n_doms",
"=",
"split",
"(",
"first_line",
",",
"int",
")",
"self",
".",
"version",
"=",
"'v1'",
"except",
"ValueError",
":",
"det_id",
",",
"self",
".",
"version",
"=",
"first_line",
".",
"split",
"(",
")",
"self",
".",
"det_id",
"=",
"int",
"(",
"det_id",
")",
"validity",
"=",
"self",
".",
"_readline",
"(",
")",
".",
"strip",
"(",
")",
"self",
".",
"valid_from",
",",
"self",
".",
"valid_until",
"=",
"split",
"(",
"validity",
",",
"float",
")",
"raw_utm_info",
"=",
"self",
".",
"_readline",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"' '",
")",
"try",
":",
"self",
".",
"utm_info",
"=",
"UTMInfo",
"(",
"*",
"raw_utm_info",
"[",
"1",
":",
"]",
")",
"except",
"TypeError",
":",
"log",
".",
"warning",
"(",
"\"Missing UTM information.\"",
")",
"n_doms",
"=",
"self",
".",
"_readline",
"(",
")",
"self",
".",
"n_doms",
"=",
"int",
"(",
"n_doms",
")"
] | Extract information from the header of the detector file | [
"Extract",
"information",
"from",
"the",
"header",
"of",
"the",
"detector",
"file"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L129-L148 | train |
tamasgal/km3pipe | km3pipe/hardware.py | Detector.dom_positions | def dom_positions(self):
"""The positions of the DOMs, calculated from PMT directions."""
if not self._dom_positions:
for dom_id in self.dom_ids:
mask = self.pmts.dom_id == dom_id
pmt_pos = self.pmts[mask].pos
pmt_dir = self.pmts[mask].dir
centre = intersect_3d(pmt_pos, pmt_pos - pmt_dir * 10)
self._dom_positions[dom_id] = centre
return self._dom_positions | python | def dom_positions(self):
"""The positions of the DOMs, calculated from PMT directions."""
if not self._dom_positions:
for dom_id in self.dom_ids:
mask = self.pmts.dom_id == dom_id
pmt_pos = self.pmts[mask].pos
pmt_dir = self.pmts[mask].dir
centre = intersect_3d(pmt_pos, pmt_pos - pmt_dir * 10)
self._dom_positions[dom_id] = centre
return self._dom_positions | [
"def",
"dom_positions",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_dom_positions",
":",
"for",
"dom_id",
"in",
"self",
".",
"dom_ids",
":",
"mask",
"=",
"self",
".",
"pmts",
".",
"dom_id",
"==",
"dom_id",
"pmt_pos",
"=",
"self",
".",
"pmts",
"[",
"mask",
"]",
".",
"pos",
"pmt_dir",
"=",
"self",
".",
"pmts",
"[",
"mask",
"]",
".",
"dir",
"centre",
"=",
"intersect_3d",
"(",
"pmt_pos",
",",
"pmt_pos",
"-",
"pmt_dir",
"*",
"10",
")",
"self",
".",
"_dom_positions",
"[",
"dom_id",
"]",
"=",
"centre",
"return",
"self",
".",
"_dom_positions"
] | The positions of the DOMs, calculated from PMT directions. | [
"The",
"positions",
"of",
"the",
"DOMs",
"calculated",
"from",
"PMT",
"directions",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L252-L261 | train |
tamasgal/km3pipe | km3pipe/hardware.py | Detector.dom_table | def dom_table(self):
"""A `Table` containing DOM attributes"""
if self._dom_table is None:
data = defaultdict(list)
for dom_id, (du, floor, _) in self.doms.items():
data['dom_id'].append(dom_id)
data['du'].append(du)
data['floor'].append(floor)
dom_position = self.dom_positions[dom_id]
data['pos_x'].append(dom_position[0])
data['pos_y'].append(dom_position[1])
data['pos_z'].append(dom_position[2])
self._dom_table = Table(data, name='DOMs', h5loc='/dom_table')
return self._dom_table | python | def dom_table(self):
"""A `Table` containing DOM attributes"""
if self._dom_table is None:
data = defaultdict(list)
for dom_id, (du, floor, _) in self.doms.items():
data['dom_id'].append(dom_id)
data['du'].append(du)
data['floor'].append(floor)
dom_position = self.dom_positions[dom_id]
data['pos_x'].append(dom_position[0])
data['pos_y'].append(dom_position[1])
data['pos_z'].append(dom_position[2])
self._dom_table = Table(data, name='DOMs', h5loc='/dom_table')
return self._dom_table | [
"def",
"dom_table",
"(",
"self",
")",
":",
"if",
"self",
".",
"_dom_table",
"is",
"None",
":",
"data",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"dom_id",
",",
"(",
"du",
",",
"floor",
",",
"_",
")",
"in",
"self",
".",
"doms",
".",
"items",
"(",
")",
":",
"data",
"[",
"'dom_id'",
"]",
".",
"append",
"(",
"dom_id",
")",
"data",
"[",
"'du'",
"]",
".",
"append",
"(",
"du",
")",
"data",
"[",
"'floor'",
"]",
".",
"append",
"(",
"floor",
")",
"dom_position",
"=",
"self",
".",
"dom_positions",
"[",
"dom_id",
"]",
"data",
"[",
"'pos_x'",
"]",
".",
"append",
"(",
"dom_position",
"[",
"0",
"]",
")",
"data",
"[",
"'pos_y'",
"]",
".",
"append",
"(",
"dom_position",
"[",
"1",
"]",
")",
"data",
"[",
"'pos_z'",
"]",
".",
"append",
"(",
"dom_position",
"[",
"2",
"]",
")",
"self",
".",
"_dom_table",
"=",
"Table",
"(",
"data",
",",
"name",
"=",
"'DOMs'",
",",
"h5loc",
"=",
"'/dom_table'",
")",
"return",
"self",
".",
"_dom_table"
] | A `Table` containing DOM attributes | [
"A",
"Table",
"containing",
"DOM",
"attributes"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L264-L277 | train |
tamasgal/km3pipe | km3pipe/hardware.py | Detector.com | def com(self):
"""Center of mass, calculated from the mean of the PMT positions"""
if self._com is None:
self._com = np.mean(self.pmts.pos, axis=0)
return self._com | python | def com(self):
"""Center of mass, calculated from the mean of the PMT positions"""
if self._com is None:
self._com = np.mean(self.pmts.pos, axis=0)
return self._com | [
"def",
"com",
"(",
"self",
")",
":",
"if",
"self",
".",
"_com",
"is",
"None",
":",
"self",
".",
"_com",
"=",
"np",
".",
"mean",
"(",
"self",
".",
"pmts",
".",
"pos",
",",
"axis",
"=",
"0",
")",
"return",
"self",
".",
"_com"
] | Center of mass, calculated from the mean of the PMT positions | [
"Center",
"of",
"mass",
"calculated",
"from",
"the",
"mean",
"of",
"the",
"PMT",
"positions"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L280-L284 | train |
tamasgal/km3pipe | km3pipe/hardware.py | Detector.xy_positions | def xy_positions(self):
"""XY positions of the DUs, given by the DOMs on floor 1."""
if self._xy_positions is None or len(self._xy_positions) == 0:
xy_pos = []
for dom_id, pos in self.dom_positions.items():
if self.domid2floor(dom_id) == 1:
xy_pos.append(np.array([pos[0], pos[1]]))
self._xy_positions = np.array(xy_pos)
return self._xy_positions | python | def xy_positions(self):
"""XY positions of the DUs, given by the DOMs on floor 1."""
if self._xy_positions is None or len(self._xy_positions) == 0:
xy_pos = []
for dom_id, pos in self.dom_positions.items():
if self.domid2floor(dom_id) == 1:
xy_pos.append(np.array([pos[0], pos[1]]))
self._xy_positions = np.array(xy_pos)
return self._xy_positions | [
"def",
"xy_positions",
"(",
"self",
")",
":",
"if",
"self",
".",
"_xy_positions",
"is",
"None",
"or",
"len",
"(",
"self",
".",
"_xy_positions",
")",
"==",
"0",
":",
"xy_pos",
"=",
"[",
"]",
"for",
"dom_id",
",",
"pos",
"in",
"self",
".",
"dom_positions",
".",
"items",
"(",
")",
":",
"if",
"self",
".",
"domid2floor",
"(",
"dom_id",
")",
"==",
"1",
":",
"xy_pos",
".",
"append",
"(",
"np",
".",
"array",
"(",
"[",
"pos",
"[",
"0",
"]",
",",
"pos",
"[",
"1",
"]",
"]",
")",
")",
"self",
".",
"_xy_positions",
"=",
"np",
".",
"array",
"(",
"xy_pos",
")",
"return",
"self",
".",
"_xy_positions"
] | XY positions of the DUs, given by the DOMs on floor 1. | [
"XY",
"positions",
"of",
"the",
"DUs",
"given",
"by",
"the",
"DOMs",
"on",
"floor",
"1",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L287-L295 | train |
tamasgal/km3pipe | km3pipe/hardware.py | Detector.translate_detector | def translate_detector(self, vector):
"""Translate the detector by a given vector"""
vector = np.array(vector, dtype=float)
self.pmts.pos_x += vector[0]
self.pmts.pos_y += vector[1]
self.pmts.pos_z += vector[2]
self.reset_caches() | python | def translate_detector(self, vector):
"""Translate the detector by a given vector"""
vector = np.array(vector, dtype=float)
self.pmts.pos_x += vector[0]
self.pmts.pos_y += vector[1]
self.pmts.pos_z += vector[2]
self.reset_caches() | [
"def",
"translate_detector",
"(",
"self",
",",
"vector",
")",
":",
"vector",
"=",
"np",
".",
"array",
"(",
"vector",
",",
"dtype",
"=",
"float",
")",
"self",
".",
"pmts",
".",
"pos_x",
"+=",
"vector",
"[",
"0",
"]",
"self",
".",
"pmts",
".",
"pos_y",
"+=",
"vector",
"[",
"1",
"]",
"self",
".",
"pmts",
".",
"pos_z",
"+=",
"vector",
"[",
"2",
"]",
"self",
".",
"reset_caches",
"(",
")"
] | Translate the detector by a given vector | [
"Translate",
"the",
"detector",
"by",
"a",
"given",
"vector"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L297-L303 | train |
tamasgal/km3pipe | km3pipe/hardware.py | Detector.pmt_angles | def pmt_angles(self):
"""A list of PMT directions sorted by PMT channel, on DU-1, floor-1"""
if self._pmt_angles == []:
mask = (self.pmts.du == 1) & (self.pmts.floor == 1)
self._pmt_angles = self.pmts.dir[mask]
return self._pmt_angles | python | def pmt_angles(self):
"""A list of PMT directions sorted by PMT channel, on DU-1, floor-1"""
if self._pmt_angles == []:
mask = (self.pmts.du == 1) & (self.pmts.floor == 1)
self._pmt_angles = self.pmts.dir[mask]
return self._pmt_angles | [
"def",
"pmt_angles",
"(",
"self",
")",
":",
"if",
"self",
".",
"_pmt_angles",
"==",
"[",
"]",
":",
"mask",
"=",
"(",
"self",
".",
"pmts",
".",
"du",
"==",
"1",
")",
"&",
"(",
"self",
".",
"pmts",
".",
"floor",
"==",
"1",
")",
"self",
".",
"_pmt_angles",
"=",
"self",
".",
"pmts",
".",
"dir",
"[",
"mask",
"]",
"return",
"self",
".",
"_pmt_angles"
] | A list of PMT directions sorted by PMT channel, on DU-1, floor-1 | [
"A",
"list",
"of",
"PMT",
"directions",
"sorted",
"by",
"PMT",
"channel",
"on",
"DU",
"-",
"1",
"floor",
"-",
"1"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L351-L356 | train |
tamasgal/km3pipe | km3pipe/hardware.py | Detector.ascii | def ascii(self):
"""The ascii representation of the detector"""
comments = ''
if self.version == 'v3':
for comment in self.comments:
if not comment.startswith(' '):
comment = ' ' + comment
comments += "#" + comment + "\n"
if self.version == 'v1':
header = "{det.det_id} {det.n_doms}".format(det=self)
else:
header = "{det.det_id} {det.version}".format(det=self)
header += "\n{0} {1}".format(self.valid_from, self.valid_until)
header += "\n" + str(self.utm_info) + "\n"
header += str(self.n_doms)
doms = ""
for dom_id, (line, floor, n_pmts) in self.doms.items():
doms += "{0} {1} {2} {3}\n".format(dom_id, line, floor, n_pmts)
for channel_id in range(n_pmts):
pmt_idx = self._pmt_index_by_omkey[(line, floor, channel_id)]
pmt = self.pmts[pmt_idx]
doms += " {0} {1} {2} {3} {4} {5} {6} {7}".format(
pmt.pmt_id, pmt.pos_x, pmt.pos_y, pmt.pos_z, pmt.dir_x,
pmt.dir_y, pmt.dir_z, pmt.t0
)
if self.version == 'v3':
doms += " {0}".format(pmt.status)
doms += "\n"
return comments + header + "\n" + doms | python | def ascii(self):
"""The ascii representation of the detector"""
comments = ''
if self.version == 'v3':
for comment in self.comments:
if not comment.startswith(' '):
comment = ' ' + comment
comments += "#" + comment + "\n"
if self.version == 'v1':
header = "{det.det_id} {det.n_doms}".format(det=self)
else:
header = "{det.det_id} {det.version}".format(det=self)
header += "\n{0} {1}".format(self.valid_from, self.valid_until)
header += "\n" + str(self.utm_info) + "\n"
header += str(self.n_doms)
doms = ""
for dom_id, (line, floor, n_pmts) in self.doms.items():
doms += "{0} {1} {2} {3}\n".format(dom_id, line, floor, n_pmts)
for channel_id in range(n_pmts):
pmt_idx = self._pmt_index_by_omkey[(line, floor, channel_id)]
pmt = self.pmts[pmt_idx]
doms += " {0} {1} {2} {3} {4} {5} {6} {7}".format(
pmt.pmt_id, pmt.pos_x, pmt.pos_y, pmt.pos_z, pmt.dir_x,
pmt.dir_y, pmt.dir_z, pmt.t0
)
if self.version == 'v3':
doms += " {0}".format(pmt.status)
doms += "\n"
return comments + header + "\n" + doms | [
"def",
"ascii",
"(",
"self",
")",
":",
"comments",
"=",
"''",
"if",
"self",
".",
"version",
"==",
"'v3'",
":",
"for",
"comment",
"in",
"self",
".",
"comments",
":",
"if",
"not",
"comment",
".",
"startswith",
"(",
"' '",
")",
":",
"comment",
"=",
"' '",
"+",
"comment",
"comments",
"+=",
"\"#\"",
"+",
"comment",
"+",
"\"\\n\"",
"if",
"self",
".",
"version",
"==",
"'v1'",
":",
"header",
"=",
"\"{det.det_id} {det.n_doms}\"",
".",
"format",
"(",
"det",
"=",
"self",
")",
"else",
":",
"header",
"=",
"\"{det.det_id} {det.version}\"",
".",
"format",
"(",
"det",
"=",
"self",
")",
"header",
"+=",
"\"\\n{0} {1}\"",
".",
"format",
"(",
"self",
".",
"valid_from",
",",
"self",
".",
"valid_until",
")",
"header",
"+=",
"\"\\n\"",
"+",
"str",
"(",
"self",
".",
"utm_info",
")",
"+",
"\"\\n\"",
"header",
"+=",
"str",
"(",
"self",
".",
"n_doms",
")",
"doms",
"=",
"\"\"",
"for",
"dom_id",
",",
"(",
"line",
",",
"floor",
",",
"n_pmts",
")",
"in",
"self",
".",
"doms",
".",
"items",
"(",
")",
":",
"doms",
"+=",
"\"{0} {1} {2} {3}\\n\"",
".",
"format",
"(",
"dom_id",
",",
"line",
",",
"floor",
",",
"n_pmts",
")",
"for",
"channel_id",
"in",
"range",
"(",
"n_pmts",
")",
":",
"pmt_idx",
"=",
"self",
".",
"_pmt_index_by_omkey",
"[",
"(",
"line",
",",
"floor",
",",
"channel_id",
")",
"]",
"pmt",
"=",
"self",
".",
"pmts",
"[",
"pmt_idx",
"]",
"doms",
"+=",
"\" {0} {1} {2} {3} {4} {5} {6} {7}\"",
".",
"format",
"(",
"pmt",
".",
"pmt_id",
",",
"pmt",
".",
"pos_x",
",",
"pmt",
".",
"pos_y",
",",
"pmt",
".",
"pos_z",
",",
"pmt",
".",
"dir_x",
",",
"pmt",
".",
"dir_y",
",",
"pmt",
".",
"dir_z",
",",
"pmt",
".",
"t0",
")",
"if",
"self",
".",
"version",
"==",
"'v3'",
":",
"doms",
"+=",
"\" {0}\"",
".",
"format",
"(",
"pmt",
".",
"status",
")",
"doms",
"+=",
"\"\\n\"",
"return",
"comments",
"+",
"header",
"+",
"\"\\n\"",
"+",
"doms"
] | The ascii representation of the detector | [
"The",
"ascii",
"representation",
"of",
"the",
"detector"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L359-L389 | train |
tamasgal/km3pipe | km3pipe/hardware.py | Detector.write | def write(self, filename):
"""Save detx file."""
with open(filename, 'w') as f:
f.write(self.ascii)
self.print("Detector file saved as '{0}'".format(filename)) | python | def write(self, filename):
"""Save detx file."""
with open(filename, 'w') as f:
f.write(self.ascii)
self.print("Detector file saved as '{0}'".format(filename)) | [
"def",
"write",
"(",
"self",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"ascii",
")",
"self",
".",
"print",
"(",
"\"Detector file saved as '{0}'\"",
".",
"format",
"(",
"filename",
")",
")"
] | Save detx file. | [
"Save",
"detx",
"file",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L391-L395 | train |
tamasgal/km3pipe | km3pipe/hardware.py | Detector.pmt_with_id | def pmt_with_id(self, pmt_id):
"""Get PMT with global pmt_id"""
try:
return self.pmts[self._pmt_index_by_pmt_id[pmt_id]]
except KeyError:
raise KeyError("No PMT found for ID: {0}".format(pmt_id)) | python | def pmt_with_id(self, pmt_id):
"""Get PMT with global pmt_id"""
try:
return self.pmts[self._pmt_index_by_pmt_id[pmt_id]]
except KeyError:
raise KeyError("No PMT found for ID: {0}".format(pmt_id)) | [
"def",
"pmt_with_id",
"(",
"self",
",",
"pmt_id",
")",
":",
"try",
":",
"return",
"self",
".",
"pmts",
"[",
"self",
".",
"_pmt_index_by_pmt_id",
"[",
"pmt_id",
"]",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"No PMT found for ID: {0}\"",
".",
"format",
"(",
"pmt_id",
")",
")"
] | Get PMT with global pmt_id | [
"Get",
"PMT",
"with",
"global",
"pmt_id"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L397-L402 | train |
tamasgal/km3pipe | km3pipe/hardware.py | Detector.get_pmt | def get_pmt(self, dom_id, channel_id):
"""Return PMT with DOM ID and DAQ channel ID"""
du, floor, _ = self.doms[dom_id]
pmt = self.pmts[self._pmt_index_by_omkey[(du, floor, channel_id)]]
return pmt | python | def get_pmt(self, dom_id, channel_id):
"""Return PMT with DOM ID and DAQ channel ID"""
du, floor, _ = self.doms[dom_id]
pmt = self.pmts[self._pmt_index_by_omkey[(du, floor, channel_id)]]
return pmt | [
"def",
"get_pmt",
"(",
"self",
",",
"dom_id",
",",
"channel_id",
")",
":",
"du",
",",
"floor",
",",
"_",
"=",
"self",
".",
"doms",
"[",
"dom_id",
"]",
"pmt",
"=",
"self",
".",
"pmts",
"[",
"self",
".",
"_pmt_index_by_omkey",
"[",
"(",
"du",
",",
"floor",
",",
"channel_id",
")",
"]",
"]",
"return",
"pmt"
] | Return PMT with DOM ID and DAQ channel ID | [
"Return",
"PMT",
"with",
"DOM",
"ID",
"and",
"DAQ",
"channel",
"ID"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L404-L408 | train |
tamasgal/km3pipe | km3modules/mc.py | convert_mc_times_to_jte_times | def convert_mc_times_to_jte_times(times_mc, evt_timestamp_in_ns, evt_mc_time):
"""
Function that converts MC times to JTE times.
Parameters
----------
times_mc : np.ndarray
Time array with MC times.
evt_timestamp_in_ns : int
Total timestamp of the event in nanoseconds.
evt_mc_time : int
Mc time of the event in nanoseconds.
Returns
-------
ndarray
Converted time array with JTE times.
"""
# needs to be cast to normal ndarray (not recarray), or else we
# would get invalid type promotion
times_mc = np.array(times_mc).astype(float)
times_jte = times_mc - evt_timestamp_in_ns + evt_mc_time
return times_jte | python | def convert_mc_times_to_jte_times(times_mc, evt_timestamp_in_ns, evt_mc_time):
"""
Function that converts MC times to JTE times.
Parameters
----------
times_mc : np.ndarray
Time array with MC times.
evt_timestamp_in_ns : int
Total timestamp of the event in nanoseconds.
evt_mc_time : int
Mc time of the event in nanoseconds.
Returns
-------
ndarray
Converted time array with JTE times.
"""
# needs to be cast to normal ndarray (not recarray), or else we
# would get invalid type promotion
times_mc = np.array(times_mc).astype(float)
times_jte = times_mc - evt_timestamp_in_ns + evt_mc_time
return times_jte | [
"def",
"convert_mc_times_to_jte_times",
"(",
"times_mc",
",",
"evt_timestamp_in_ns",
",",
"evt_mc_time",
")",
":",
"# needs to be cast to normal ndarray (not recarray), or else we",
"# would get invalid type promotion",
"times_mc",
"=",
"np",
".",
"array",
"(",
"times_mc",
")",
".",
"astype",
"(",
"float",
")",
"times_jte",
"=",
"times_mc",
"-",
"evt_timestamp_in_ns",
"+",
"evt_mc_time",
"return",
"times_jte"
] | Function that converts MC times to JTE times.
Parameters
----------
times_mc : np.ndarray
Time array with MC times.
evt_timestamp_in_ns : int
Total timestamp of the event in nanoseconds.
evt_mc_time : int
Mc time of the event in nanoseconds.
Returns
-------
ndarray
Converted time array with JTE times. | [
"Function",
"that",
"converts",
"MC",
"times",
"to",
"JTE",
"times",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/mc.py#L86-L108 | train |
tamasgal/km3pipe | km3pipe/tools.py | iexists | def iexists(irods_path):
"""Returns True of iRODS path exists, otherwise False"""
try:
subprocess.check_output(
'ils {}'.format(irods_path),
shell=True,
stderr=subprocess.PIPE,
)
return True
except subprocess.CalledProcessError:
return False | python | def iexists(irods_path):
"""Returns True of iRODS path exists, otherwise False"""
try:
subprocess.check_output(
'ils {}'.format(irods_path),
shell=True,
stderr=subprocess.PIPE,
)
return True
except subprocess.CalledProcessError:
return False | [
"def",
"iexists",
"(",
"irods_path",
")",
":",
"try",
":",
"subprocess",
".",
"check_output",
"(",
"'ils {}'",
".",
"format",
"(",
"irods_path",
")",
",",
"shell",
"=",
"True",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
")",
"return",
"True",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"return",
"False"
] | Returns True of iRODS path exists, otherwise False | [
"Returns",
"True",
"of",
"iRODS",
"path",
"exists",
"otherwise",
"False"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L42-L52 | train |
tamasgal/km3pipe | km3pipe/tools.py | token_urlsafe | def token_urlsafe(nbytes=32):
"""Return a random URL-safe text string, in Base64 encoding.
This is taken and slightly modified from the Python 3.6 stdlib.
The string has *nbytes* random bytes. If *nbytes* is ``None``
or not supplied, a reasonable default is used.
>>> token_urlsafe(16) #doctest:+SKIP
'Drmhze6EPcv0fN_81Bj-nA'
"""
tok = os.urandom(nbytes)
return base64.urlsafe_b64encode(tok).rstrip(b'=').decode('ascii') | python | def token_urlsafe(nbytes=32):
"""Return a random URL-safe text string, in Base64 encoding.
This is taken and slightly modified from the Python 3.6 stdlib.
The string has *nbytes* random bytes. If *nbytes* is ``None``
or not supplied, a reasonable default is used.
>>> token_urlsafe(16) #doctest:+SKIP
'Drmhze6EPcv0fN_81Bj-nA'
"""
tok = os.urandom(nbytes)
return base64.urlsafe_b64encode(tok).rstrip(b'=').decode('ascii') | [
"def",
"token_urlsafe",
"(",
"nbytes",
"=",
"32",
")",
":",
"tok",
"=",
"os",
".",
"urandom",
"(",
"nbytes",
")",
"return",
"base64",
".",
"urlsafe_b64encode",
"(",
"tok",
")",
".",
"rstrip",
"(",
"b'='",
")",
".",
"decode",
"(",
"'ascii'",
")"
] | Return a random URL-safe text string, in Base64 encoding.
This is taken and slightly modified from the Python 3.6 stdlib.
The string has *nbytes* random bytes. If *nbytes* is ``None``
or not supplied, a reasonable default is used.
>>> token_urlsafe(16) #doctest:+SKIP
'Drmhze6EPcv0fN_81Bj-nA' | [
"Return",
"a",
"random",
"URL",
"-",
"safe",
"text",
"string",
"in",
"Base64",
"encoding",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L55-L68 | train |
tamasgal/km3pipe | km3pipe/tools.py | prettyln | def prettyln(text, fill='-', align='^', prefix='[ ', suffix=' ]', length=69):
"""Wrap `text` in a pretty line with maximum length."""
text = '{prefix}{0}{suffix}'.format(text, prefix=prefix, suffix=suffix)
print(
"{0:{fill}{align}{length}}".format(
text, fill=fill, align=align, length=length
)
) | python | def prettyln(text, fill='-', align='^', prefix='[ ', suffix=' ]', length=69):
"""Wrap `text` in a pretty line with maximum length."""
text = '{prefix}{0}{suffix}'.format(text, prefix=prefix, suffix=suffix)
print(
"{0:{fill}{align}{length}}".format(
text, fill=fill, align=align, length=length
)
) | [
"def",
"prettyln",
"(",
"text",
",",
"fill",
"=",
"'-'",
",",
"align",
"=",
"'^'",
",",
"prefix",
"=",
"'[ '",
",",
"suffix",
"=",
"' ]'",
",",
"length",
"=",
"69",
")",
":",
"text",
"=",
"'{prefix}{0}{suffix}'",
".",
"format",
"(",
"text",
",",
"prefix",
"=",
"prefix",
",",
"suffix",
"=",
"suffix",
")",
"print",
"(",
"\"{0:{fill}{align}{length}}\"",
".",
"format",
"(",
"text",
",",
"fill",
"=",
"fill",
",",
"align",
"=",
"align",
",",
"length",
"=",
"length",
")",
")"
] | Wrap `text` in a pretty line with maximum length. | [
"Wrap",
"text",
"in",
"a",
"pretty",
"line",
"with",
"maximum",
"length",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L77-L84 | train |
tamasgal/km3pipe | km3pipe/tools.py | unpack_nfirst | def unpack_nfirst(seq, nfirst):
"""Unpack the nfrist items from the list and return the rest.
>>> a, b, c, rest = unpack_nfirst((1, 2, 3, 4, 5), 3)
>>> a, b, c
(1, 2, 3)
>>> rest
(4, 5)
"""
iterator = iter(seq)
for _ in range(nfirst):
yield next(iterator, None)
yield tuple(iterator) | python | def unpack_nfirst(seq, nfirst):
"""Unpack the nfrist items from the list and return the rest.
>>> a, b, c, rest = unpack_nfirst((1, 2, 3, 4, 5), 3)
>>> a, b, c
(1, 2, 3)
>>> rest
(4, 5)
"""
iterator = iter(seq)
for _ in range(nfirst):
yield next(iterator, None)
yield tuple(iterator) | [
"def",
"unpack_nfirst",
"(",
"seq",
",",
"nfirst",
")",
":",
"iterator",
"=",
"iter",
"(",
"seq",
")",
"for",
"_",
"in",
"range",
"(",
"nfirst",
")",
":",
"yield",
"next",
"(",
"iterator",
",",
"None",
")",
"yield",
"tuple",
"(",
"iterator",
")"
] | Unpack the nfrist items from the list and return the rest.
>>> a, b, c, rest = unpack_nfirst((1, 2, 3, 4, 5), 3)
>>> a, b, c
(1, 2, 3)
>>> rest
(4, 5) | [
"Unpack",
"the",
"nfrist",
"items",
"from",
"the",
"list",
"and",
"return",
"the",
"rest",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L98-L111 | train |
tamasgal/km3pipe | km3pipe/tools.py | split | def split(string, callback=None, sep=None):
"""Split the string and execute the callback function on each part.
>>> string = "1 2 3 4"
>>> parts = split(string, int)
>>> parts
[1, 2, 3, 4]
"""
if callback is not None:
return [callback(i) for i in string.split(sep)]
else:
return string.split(sep) | python | def split(string, callback=None, sep=None):
"""Split the string and execute the callback function on each part.
>>> string = "1 2 3 4"
>>> parts = split(string, int)
>>> parts
[1, 2, 3, 4]
"""
if callback is not None:
return [callback(i) for i in string.split(sep)]
else:
return string.split(sep) | [
"def",
"split",
"(",
"string",
",",
"callback",
"=",
"None",
",",
"sep",
"=",
"None",
")",
":",
"if",
"callback",
"is",
"not",
"None",
":",
"return",
"[",
"callback",
"(",
"i",
")",
"for",
"i",
"in",
"string",
".",
"split",
"(",
"sep",
")",
"]",
"else",
":",
"return",
"string",
".",
"split",
"(",
"sep",
")"
] | Split the string and execute the callback function on each part.
>>> string = "1 2 3 4"
>>> parts = split(string, int)
>>> parts
[1, 2, 3, 4] | [
"Split",
"the",
"string",
"and",
"execute",
"the",
"callback",
"function",
"on",
"each",
"part",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L114-L126 | train |
tamasgal/km3pipe | km3pipe/tools.py | namedtuple_with_defaults | def namedtuple_with_defaults(typename, field_names, default_values=[]):
"""Create a namedtuple with default values
>>> Node = namedtuple_with_defaults('Node', 'val left right')
>>> Node()
Node(val=None, left=None, right=None)
>>> Node = namedtuple_with_defaults('Node', 'val left right', [1, 2, 3])
>>> Node()
Node(val=1, left=2, right=3)
>>> Node = namedtuple_with_defaults('Node', 'val left right', {'right':7})
>>> Node()
Node(val=None, left=None, right=7)
>>> Node(4)
Node(val=4, left=None, right=7)
"""
the_tuple = collections.namedtuple(typename, field_names)
the_tuple.__new__.__defaults__ = (None, ) * len(the_tuple._fields)
if isinstance(default_values, collections.Mapping):
prototype = the_tuple(**default_values)
else:
prototype = the_tuple(*default_values)
the_tuple.__new__.__defaults__ = tuple(prototype)
return the_tuple | python | def namedtuple_with_defaults(typename, field_names, default_values=[]):
"""Create a namedtuple with default values
>>> Node = namedtuple_with_defaults('Node', 'val left right')
>>> Node()
Node(val=None, left=None, right=None)
>>> Node = namedtuple_with_defaults('Node', 'val left right', [1, 2, 3])
>>> Node()
Node(val=1, left=2, right=3)
>>> Node = namedtuple_with_defaults('Node', 'val left right', {'right':7})
>>> Node()
Node(val=None, left=None, right=7)
>>> Node(4)
Node(val=4, left=None, right=7)
"""
the_tuple = collections.namedtuple(typename, field_names)
the_tuple.__new__.__defaults__ = (None, ) * len(the_tuple._fields)
if isinstance(default_values, collections.Mapping):
prototype = the_tuple(**default_values)
else:
prototype = the_tuple(*default_values)
the_tuple.__new__.__defaults__ = tuple(prototype)
return the_tuple | [
"def",
"namedtuple_with_defaults",
"(",
"typename",
",",
"field_names",
",",
"default_values",
"=",
"[",
"]",
")",
":",
"the_tuple",
"=",
"collections",
".",
"namedtuple",
"(",
"typename",
",",
"field_names",
")",
"the_tuple",
".",
"__new__",
".",
"__defaults__",
"=",
"(",
"None",
",",
")",
"*",
"len",
"(",
"the_tuple",
".",
"_fields",
")",
"if",
"isinstance",
"(",
"default_values",
",",
"collections",
".",
"Mapping",
")",
":",
"prototype",
"=",
"the_tuple",
"(",
"*",
"*",
"default_values",
")",
"else",
":",
"prototype",
"=",
"the_tuple",
"(",
"*",
"default_values",
")",
"the_tuple",
".",
"__new__",
".",
"__defaults__",
"=",
"tuple",
"(",
"prototype",
")",
"return",
"the_tuple"
] | Create a namedtuple with default values
>>> Node = namedtuple_with_defaults('Node', 'val left right')
>>> Node()
Node(val=None, left=None, right=None)
>>> Node = namedtuple_with_defaults('Node', 'val left right', [1, 2, 3])
>>> Node()
Node(val=1, left=2, right=3)
>>> Node = namedtuple_with_defaults('Node', 'val left right', {'right':7})
>>> Node()
Node(val=None, left=None, right=7)
>>> Node(4)
Node(val=4, left=None, right=7) | [
"Create",
"a",
"namedtuple",
"with",
"default",
"values"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L129-L151 | train |
tamasgal/km3pipe | km3pipe/tools.py | remain_file_pointer | def remain_file_pointer(function):
"""Remain the file pointer position after calling the decorated function
This decorator assumes that the last argument is the file handler.
"""
def wrapper(*args, **kwargs):
"""Wrap the function and remain its parameters and return values"""
file_obj = args[-1]
old_position = file_obj.tell()
return_value = function(*args, **kwargs)
file_obj.seek(old_position, 0)
return return_value
return wrapper | python | def remain_file_pointer(function):
"""Remain the file pointer position after calling the decorated function
This decorator assumes that the last argument is the file handler.
"""
def wrapper(*args, **kwargs):
"""Wrap the function and remain its parameters and return values"""
file_obj = args[-1]
old_position = file_obj.tell()
return_value = function(*args, **kwargs)
file_obj.seek(old_position, 0)
return return_value
return wrapper | [
"def",
"remain_file_pointer",
"(",
"function",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrap the function and remain its parameters and return values\"\"\"",
"file_obj",
"=",
"args",
"[",
"-",
"1",
"]",
"old_position",
"=",
"file_obj",
".",
"tell",
"(",
")",
"return_value",
"=",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"file_obj",
".",
"seek",
"(",
"old_position",
",",
"0",
")",
"return",
"return_value",
"return",
"wrapper"
] | Remain the file pointer position after calling the decorated function
This decorator assumes that the last argument is the file handler. | [
"Remain",
"the",
"file",
"pointer",
"position",
"after",
"calling",
"the",
"decorated",
"function"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L154-L169 | train |
tamasgal/km3pipe | km3pipe/tools.py | decamelise | def decamelise(text):
"""Convert CamelCase to lower_and_underscore."""
s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', text)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower() | python | def decamelise(text):
"""Convert CamelCase to lower_and_underscore."""
s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', text)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower() | [
"def",
"decamelise",
"(",
"text",
")",
":",
"s",
"=",
"re",
".",
"sub",
"(",
"'(.)([A-Z][a-z]+)'",
",",
"r'\\1_\\2'",
",",
"text",
")",
"return",
"re",
".",
"sub",
"(",
"'([a-z0-9])([A-Z])'",
",",
"r'\\1_\\2'",
",",
"s",
")",
".",
"lower",
"(",
")"
] | Convert CamelCase to lower_and_underscore. | [
"Convert",
"CamelCase",
"to",
"lower_and_underscore",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L180-L183 | train |
tamasgal/km3pipe | km3pipe/tools.py | camelise | def camelise(text, capital_first=True):
"""Convert lower_underscore to CamelCase."""
def camelcase():
if not capital_first:
yield str.lower
while True:
yield str.capitalize
if istype(text, 'unicode'):
text = text.encode('utf8')
c = camelcase()
return "".join(next(c)(x) if x else '_' for x in text.split("_")) | python | def camelise(text, capital_first=True):
"""Convert lower_underscore to CamelCase."""
def camelcase():
if not capital_first:
yield str.lower
while True:
yield str.capitalize
if istype(text, 'unicode'):
text = text.encode('utf8')
c = camelcase()
return "".join(next(c)(x) if x else '_' for x in text.split("_")) | [
"def",
"camelise",
"(",
"text",
",",
"capital_first",
"=",
"True",
")",
":",
"def",
"camelcase",
"(",
")",
":",
"if",
"not",
"capital_first",
":",
"yield",
"str",
".",
"lower",
"while",
"True",
":",
"yield",
"str",
".",
"capitalize",
"if",
"istype",
"(",
"text",
",",
"'unicode'",
")",
":",
"text",
"=",
"text",
".",
"encode",
"(",
"'utf8'",
")",
"c",
"=",
"camelcase",
"(",
")",
"return",
"\"\"",
".",
"join",
"(",
"next",
"(",
"c",
")",
"(",
"x",
")",
"if",
"x",
"else",
"'_'",
"for",
"x",
"in",
"text",
".",
"split",
"(",
"\"_\"",
")",
")"
] | Convert lower_underscore to CamelCase. | [
"Convert",
"lower_underscore",
"to",
"CamelCase",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L186-L198 | train |
tamasgal/km3pipe | km3pipe/tools.py | colored | def colored(text, color=None, on_color=None, attrs=None, ansi_code=None):
"""Colorize text, while stripping nested ANSI color sequences.
Author: Konstantin Lepa <[email protected]> / termcolor
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
"""
if os.getenv('ANSI_COLORS_DISABLED') is None:
if ansi_code is not None:
return "\033[38;5;{}m{}\033[0m".format(ansi_code, text)
fmt_str = '\033[%dm%s'
if color is not None:
text = re.sub(COLORS_RE + '(.*?)' + RESET_RE, r'\1', text)
text = fmt_str % (COLORS[color], text)
if on_color is not None:
text = re.sub(HIGHLIGHTS_RE + '(.*?)' + RESET_RE, r'\1', text)
text = fmt_str % (HIGHLIGHTS[on_color], text)
if attrs is not None:
text = re.sub(ATTRIBUTES_RE + '(.*?)' + RESET_RE, r'\1', text)
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
return text + RESET
else:
return text | python | def colored(text, color=None, on_color=None, attrs=None, ansi_code=None):
"""Colorize text, while stripping nested ANSI color sequences.
Author: Konstantin Lepa <[email protected]> / termcolor
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
"""
if os.getenv('ANSI_COLORS_DISABLED') is None:
if ansi_code is not None:
return "\033[38;5;{}m{}\033[0m".format(ansi_code, text)
fmt_str = '\033[%dm%s'
if color is not None:
text = re.sub(COLORS_RE + '(.*?)' + RESET_RE, r'\1', text)
text = fmt_str % (COLORS[color], text)
if on_color is not None:
text = re.sub(HIGHLIGHTS_RE + '(.*?)' + RESET_RE, r'\1', text)
text = fmt_str % (HIGHLIGHTS[on_color], text)
if attrs is not None:
text = re.sub(ATTRIBUTES_RE + '(.*?)' + RESET_RE, r'\1', text)
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
return text + RESET
else:
return text | [
"def",
"colored",
"(",
"text",
",",
"color",
"=",
"None",
",",
"on_color",
"=",
"None",
",",
"attrs",
"=",
"None",
",",
"ansi_code",
"=",
"None",
")",
":",
"if",
"os",
".",
"getenv",
"(",
"'ANSI_COLORS_DISABLED'",
")",
"is",
"None",
":",
"if",
"ansi_code",
"is",
"not",
"None",
":",
"return",
"\"\\033[38;5;{}m{}\\033[0m\"",
".",
"format",
"(",
"ansi_code",
",",
"text",
")",
"fmt_str",
"=",
"'\\033[%dm%s'",
"if",
"color",
"is",
"not",
"None",
":",
"text",
"=",
"re",
".",
"sub",
"(",
"COLORS_RE",
"+",
"'(.*?)'",
"+",
"RESET_RE",
",",
"r'\\1'",
",",
"text",
")",
"text",
"=",
"fmt_str",
"%",
"(",
"COLORS",
"[",
"color",
"]",
",",
"text",
")",
"if",
"on_color",
"is",
"not",
"None",
":",
"text",
"=",
"re",
".",
"sub",
"(",
"HIGHLIGHTS_RE",
"+",
"'(.*?)'",
"+",
"RESET_RE",
",",
"r'\\1'",
",",
"text",
")",
"text",
"=",
"fmt_str",
"%",
"(",
"HIGHLIGHTS",
"[",
"on_color",
"]",
",",
"text",
")",
"if",
"attrs",
"is",
"not",
"None",
":",
"text",
"=",
"re",
".",
"sub",
"(",
"ATTRIBUTES_RE",
"+",
"'(.*?)'",
"+",
"RESET_RE",
",",
"r'\\1'",
",",
"text",
")",
"for",
"attr",
"in",
"attrs",
":",
"text",
"=",
"fmt_str",
"%",
"(",
"ATTRIBUTES",
"[",
"attr",
"]",
",",
"text",
")",
"return",
"text",
"+",
"RESET",
"else",
":",
"return",
"text"
] | Colorize text, while stripping nested ANSI color sequences.
Author: Konstantin Lepa <[email protected]> / termcolor
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green') | [
"Colorize",
"text",
"while",
"stripping",
"nested",
"ANSI",
"color",
"sequences",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L247-L278 | train |
tamasgal/km3pipe | km3pipe/tools.py | zero_pad | def zero_pad(m, n=1):
"""Pad a matrix with zeros, on all sides."""
return np.pad(m, (n, n), mode='constant', constant_values=[0]) | python | def zero_pad(m, n=1):
"""Pad a matrix with zeros, on all sides."""
return np.pad(m, (n, n), mode='constant', constant_values=[0]) | [
"def",
"zero_pad",
"(",
"m",
",",
"n",
"=",
"1",
")",
":",
"return",
"np",
".",
"pad",
"(",
"m",
",",
"(",
"n",
",",
"n",
")",
",",
"mode",
"=",
"'constant'",
",",
"constant_values",
"=",
"[",
"0",
"]",
")"
] | Pad a matrix with zeros, on all sides. | [
"Pad",
"a",
"matrix",
"with",
"zeros",
"on",
"all",
"sides",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L332-L334 | train |
tamasgal/km3pipe | km3pipe/tools.py | supports_color | def supports_color():
"""Checks if the terminal supports color."""
if isnotebook():
return True
supported_platform = sys.platform != 'win32' or 'ANSICON' in os.environ
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if not supported_platform or not is_a_tty:
return False
return True | python | def supports_color():
"""Checks if the terminal supports color."""
if isnotebook():
return True
supported_platform = sys.platform != 'win32' or 'ANSICON' in os.environ
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if not supported_platform or not is_a_tty:
return False
return True | [
"def",
"supports_color",
"(",
")",
":",
"if",
"isnotebook",
"(",
")",
":",
"return",
"True",
"supported_platform",
"=",
"sys",
".",
"platform",
"!=",
"'win32'",
"or",
"'ANSICON'",
"in",
"os",
".",
"environ",
"is_a_tty",
"=",
"hasattr",
"(",
"sys",
".",
"stdout",
",",
"'isatty'",
")",
"and",
"sys",
".",
"stdout",
".",
"isatty",
"(",
")",
"if",
"not",
"supported_platform",
"or",
"not",
"is_a_tty",
":",
"return",
"False",
"return",
"True"
] | Checks if the terminal supports color. | [
"Checks",
"if",
"the",
"terminal",
"supports",
"color",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L356-L366 | train |
tamasgal/km3pipe | km3pipe/tools.py | get_jpp_revision | def get_jpp_revision(via_command='JPrint'):
"""Retrieves the Jpp revision number"""
try:
output = subprocess.check_output([via_command, '-v'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if e.returncode == 1:
output = e.output
else:
return None
except OSError:
return None
revision = output.decode().split('\n')[0].split()[1].strip()
return revision | python | def get_jpp_revision(via_command='JPrint'):
"""Retrieves the Jpp revision number"""
try:
output = subprocess.check_output([via_command, '-v'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if e.returncode == 1:
output = e.output
else:
return None
except OSError:
return None
revision = output.decode().split('\n')[0].split()[1].strip()
return revision | [
"def",
"get_jpp_revision",
"(",
"via_command",
"=",
"'JPrint'",
")",
":",
"try",
":",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"via_command",
",",
"'-v'",
"]",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"if",
"e",
".",
"returncode",
"==",
"1",
":",
"output",
"=",
"e",
".",
"output",
"else",
":",
"return",
"None",
"except",
"OSError",
":",
"return",
"None",
"revision",
"=",
"output",
".",
"decode",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"[",
"0",
"]",
".",
"split",
"(",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"return",
"revision"
] | Retrieves the Jpp revision number | [
"Retrieves",
"the",
"Jpp",
"revision",
"number"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L369-L382 | train |
tamasgal/km3pipe | km3pipe/tools.py | timed_cache | def timed_cache(**timed_cache_kwargs):
"""LRU cache decorator with timeout.
Parameters
----------
days: int
seconds: int
microseconds: int
milliseconds: int
minutes: int
hours: int
weeks: int
maxsise: int [default: 128]
typed: bool [default: False]
"""
def _wrapper(f):
maxsize = timed_cache_kwargs.pop('maxsize', 128)
typed = timed_cache_kwargs.pop('typed', False)
update_delta = timedelta(**timed_cache_kwargs)
# nonlocal workaround to support Python 2
# https://technotroph.wordpress.com/2012/10/01/python-closures-and-the-python-2-7-nonlocal-solution/
d = {'next_update': datetime.utcnow() - update_delta}
try:
f = functools.lru_cache(maxsize=maxsize, typed=typed)(f)
except AttributeError:
print(
"LRU caching is not available in Pyton 2.7, "
"this will have no effect!"
)
pass
@functools.wraps(f)
def _wrapped(*args, **kwargs):
now = datetime.utcnow()
if now >= d['next_update']:
try:
f.cache_clear()
except AttributeError:
pass
d['next_update'] = now + update_delta
return f(*args, **kwargs)
return _wrapped
return _wrapper | python | def timed_cache(**timed_cache_kwargs):
"""LRU cache decorator with timeout.
Parameters
----------
days: int
seconds: int
microseconds: int
milliseconds: int
minutes: int
hours: int
weeks: int
maxsise: int [default: 128]
typed: bool [default: False]
"""
def _wrapper(f):
maxsize = timed_cache_kwargs.pop('maxsize', 128)
typed = timed_cache_kwargs.pop('typed', False)
update_delta = timedelta(**timed_cache_kwargs)
# nonlocal workaround to support Python 2
# https://technotroph.wordpress.com/2012/10/01/python-closures-and-the-python-2-7-nonlocal-solution/
d = {'next_update': datetime.utcnow() - update_delta}
try:
f = functools.lru_cache(maxsize=maxsize, typed=typed)(f)
except AttributeError:
print(
"LRU caching is not available in Pyton 2.7, "
"this will have no effect!"
)
pass
@functools.wraps(f)
def _wrapped(*args, **kwargs):
now = datetime.utcnow()
if now >= d['next_update']:
try:
f.cache_clear()
except AttributeError:
pass
d['next_update'] = now + update_delta
return f(*args, **kwargs)
return _wrapped
return _wrapper | [
"def",
"timed_cache",
"(",
"*",
"*",
"timed_cache_kwargs",
")",
":",
"def",
"_wrapper",
"(",
"f",
")",
":",
"maxsize",
"=",
"timed_cache_kwargs",
".",
"pop",
"(",
"'maxsize'",
",",
"128",
")",
"typed",
"=",
"timed_cache_kwargs",
".",
"pop",
"(",
"'typed'",
",",
"False",
")",
"update_delta",
"=",
"timedelta",
"(",
"*",
"*",
"timed_cache_kwargs",
")",
"# nonlocal workaround to support Python 2",
"# https://technotroph.wordpress.com/2012/10/01/python-closures-and-the-python-2-7-nonlocal-solution/",
"d",
"=",
"{",
"'next_update'",
":",
"datetime",
".",
"utcnow",
"(",
")",
"-",
"update_delta",
"}",
"try",
":",
"f",
"=",
"functools",
".",
"lru_cache",
"(",
"maxsize",
"=",
"maxsize",
",",
"typed",
"=",
"typed",
")",
"(",
"f",
")",
"except",
"AttributeError",
":",
"print",
"(",
"\"LRU caching is not available in Pyton 2.7, \"",
"\"this will have no effect!\"",
")",
"pass",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"_wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"now",
">=",
"d",
"[",
"'next_update'",
"]",
":",
"try",
":",
"f",
".",
"cache_clear",
"(",
")",
"except",
"AttributeError",
":",
"pass",
"d",
"[",
"'next_update'",
"]",
"=",
"now",
"+",
"update_delta",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_wrapped",
"return",
"_wrapper"
] | LRU cache decorator with timeout.
Parameters
----------
days: int
seconds: int
microseconds: int
milliseconds: int
minutes: int
hours: int
weeks: int
maxsise: int [default: 128]
typed: bool [default: False] | [
"LRU",
"cache",
"decorator",
"with",
"timeout",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L385-L430 | train |
ioos/pyoos | pyoos/parsers/ioos/one/timeseries_profile.py | ProfileCache._get_point | def _get_point(self, profile, point):
"""
Finds the given point in the profile, or adds it in sorted z order.
"""
cur_points_z = [p.location.z for p in profile.elements]
try:
cur_idx = cur_points_z.index(point.z)
return profile.elements[cur_idx]
except ValueError:
new_idx = bisect_left(cur_points_z, point.z)
new_point = Point()
new_point.location = sPoint(point)
new_point.time = profile.time
profile.elements.insert(new_idx, new_point)
return new_point | python | def _get_point(self, profile, point):
"""
Finds the given point in the profile, or adds it in sorted z order.
"""
cur_points_z = [p.location.z for p in profile.elements]
try:
cur_idx = cur_points_z.index(point.z)
return profile.elements[cur_idx]
except ValueError:
new_idx = bisect_left(cur_points_z, point.z)
new_point = Point()
new_point.location = sPoint(point)
new_point.time = profile.time
profile.elements.insert(new_idx, new_point)
return new_point | [
"def",
"_get_point",
"(",
"self",
",",
"profile",
",",
"point",
")",
":",
"cur_points_z",
"=",
"[",
"p",
".",
"location",
".",
"z",
"for",
"p",
"in",
"profile",
".",
"elements",
"]",
"try",
":",
"cur_idx",
"=",
"cur_points_z",
".",
"index",
"(",
"point",
".",
"z",
")",
"return",
"profile",
".",
"elements",
"[",
"cur_idx",
"]",
"except",
"ValueError",
":",
"new_idx",
"=",
"bisect_left",
"(",
"cur_points_z",
",",
"point",
".",
"z",
")",
"new_point",
"=",
"Point",
"(",
")",
"new_point",
".",
"location",
"=",
"sPoint",
"(",
"point",
")",
"new_point",
".",
"time",
"=",
"profile",
".",
"time",
"profile",
".",
"elements",
".",
"insert",
"(",
"new_idx",
",",
"new_point",
")",
"return",
"new_point"
] | Finds the given point in the profile, or adds it in sorted z order. | [
"Finds",
"the",
"given",
"point",
"in",
"the",
"profile",
"or",
"adds",
"it",
"in",
"sorted",
"z",
"order",
"."
] | 908660385029ecd8eccda8ab3a6b20b47b915c77 | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/timeseries_profile.py#L74-L88 | train |
ioos/pyoos | pyoos/parsers/ioos/one/timeseries_profile.py | TimeSeriesProfile._parse_data_array | def _parse_data_array(self, data_array):
"""
Parses a general DataArray.
"""
# decimalSeparator = data_array.encoding.decimalSeparator
tokenSeparator = data_array.encoding.tokenSeparator
blockSeparator = data_array.encoding.blockSeparator
# collapseWhiteSpaces = data_array.encoding.collapseWhiteSpaces
data_values = data_array.values
lines = [x for x in data_values.split(blockSeparator) if x != ""]
ret_val = []
for row in lines:
values = row.split(tokenSeparator)
ret_val.append(
[
float(v)
if " " not in v.strip()
else [float(vv) for vv in v.split()]
for v in values
]
)
# transpose into columns
return [list(x) for x in zip(*ret_val)] | python | def _parse_data_array(self, data_array):
"""
Parses a general DataArray.
"""
# decimalSeparator = data_array.encoding.decimalSeparator
tokenSeparator = data_array.encoding.tokenSeparator
blockSeparator = data_array.encoding.blockSeparator
# collapseWhiteSpaces = data_array.encoding.collapseWhiteSpaces
data_values = data_array.values
lines = [x for x in data_values.split(blockSeparator) if x != ""]
ret_val = []
for row in lines:
values = row.split(tokenSeparator)
ret_val.append(
[
float(v)
if " " not in v.strip()
else [float(vv) for vv in v.split()]
for v in values
]
)
# transpose into columns
return [list(x) for x in zip(*ret_val)] | [
"def",
"_parse_data_array",
"(",
"self",
",",
"data_array",
")",
":",
"# decimalSeparator = data_array.encoding.decimalSeparator",
"tokenSeparator",
"=",
"data_array",
".",
"encoding",
".",
"tokenSeparator",
"blockSeparator",
"=",
"data_array",
".",
"encoding",
".",
"blockSeparator",
"# collapseWhiteSpaces = data_array.encoding.collapseWhiteSpaces",
"data_values",
"=",
"data_array",
".",
"values",
"lines",
"=",
"[",
"x",
"for",
"x",
"in",
"data_values",
".",
"split",
"(",
"blockSeparator",
")",
"if",
"x",
"!=",
"\"\"",
"]",
"ret_val",
"=",
"[",
"]",
"for",
"row",
"in",
"lines",
":",
"values",
"=",
"row",
".",
"split",
"(",
"tokenSeparator",
")",
"ret_val",
".",
"append",
"(",
"[",
"float",
"(",
"v",
")",
"if",
"\" \"",
"not",
"in",
"v",
".",
"strip",
"(",
")",
"else",
"[",
"float",
"(",
"vv",
")",
"for",
"vv",
"in",
"v",
".",
"split",
"(",
")",
"]",
"for",
"v",
"in",
"values",
"]",
")",
"# transpose into columns",
"return",
"[",
"list",
"(",
"x",
")",
"for",
"x",
"in",
"zip",
"(",
"*",
"ret_val",
")",
"]"
] | Parses a general DataArray. | [
"Parses",
"a",
"general",
"DataArray",
"."
] | 908660385029ecd8eccda8ab3a6b20b47b915c77 | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/timeseries_profile.py#L280-L306 | train |
ioos/pyoos | pyoos/parsers/ioos/one/timeseries_profile.py | TimeSeriesProfile._parse_sensor_record | def _parse_sensor_record(self, sensor_data_rec, sensor_info, rem_values):
"""
Parses values via sensor data record passed in.
Returns parsed values AND how many items it consumed out of rem_values.
"""
val_idx = 0
# @TODO seems there is only a single field in each of these
assert len(sensor_data_rec.field) == 1
sensor_data_array = sensor_data_rec.field[0].content
# there is probably not going to be a count in the def, it'll be in the data
count = None
count_text = sensor_data_array.elementCount.text
if count_text:
count = int(count_text.strip())
if not count:
count = int(rem_values[val_idx])
val_idx += 1
parsed = []
for recnum in range(count):
cur = []
for f in sensor_data_array.elementType.field:
cur_val = rem_values[val_idx]
val_idx += 1
m = Member(name=f.name, standard=f.content.definition)
if hasattr(f.content, "uom"):
m["units"] = f.content.uom
try:
m["value"] = float(cur_val)
except ValueError:
m["value"] = cur_val
if len(f.quality):
m["quality"] = []
for qual in f.quality:
cur_qual = rem_values[val_idx]
val_idx += 1
# @TODO check this against constraints
m["quality"].append(cur_qual)
cur.append(m)
parsed.append(cur)
return parsed, val_idx | python | def _parse_sensor_record(self, sensor_data_rec, sensor_info, rem_values):
"""
Parses values via sensor data record passed in.
Returns parsed values AND how many items it consumed out of rem_values.
"""
val_idx = 0
# @TODO seems there is only a single field in each of these
assert len(sensor_data_rec.field) == 1
sensor_data_array = sensor_data_rec.field[0].content
# there is probably not going to be a count in the def, it'll be in the data
count = None
count_text = sensor_data_array.elementCount.text
if count_text:
count = int(count_text.strip())
if not count:
count = int(rem_values[val_idx])
val_idx += 1
parsed = []
for recnum in range(count):
cur = []
for f in sensor_data_array.elementType.field:
cur_val = rem_values[val_idx]
val_idx += 1
m = Member(name=f.name, standard=f.content.definition)
if hasattr(f.content, "uom"):
m["units"] = f.content.uom
try:
m["value"] = float(cur_val)
except ValueError:
m["value"] = cur_val
if len(f.quality):
m["quality"] = []
for qual in f.quality:
cur_qual = rem_values[val_idx]
val_idx += 1
# @TODO check this against constraints
m["quality"].append(cur_qual)
cur.append(m)
parsed.append(cur)
return parsed, val_idx | [
"def",
"_parse_sensor_record",
"(",
"self",
",",
"sensor_data_rec",
",",
"sensor_info",
",",
"rem_values",
")",
":",
"val_idx",
"=",
"0",
"# @TODO seems there is only a single field in each of these",
"assert",
"len",
"(",
"sensor_data_rec",
".",
"field",
")",
"==",
"1",
"sensor_data_array",
"=",
"sensor_data_rec",
".",
"field",
"[",
"0",
"]",
".",
"content",
"# there is probably not going to be a count in the def, it'll be in the data",
"count",
"=",
"None",
"count_text",
"=",
"sensor_data_array",
".",
"elementCount",
".",
"text",
"if",
"count_text",
":",
"count",
"=",
"int",
"(",
"count_text",
".",
"strip",
"(",
")",
")",
"if",
"not",
"count",
":",
"count",
"=",
"int",
"(",
"rem_values",
"[",
"val_idx",
"]",
")",
"val_idx",
"+=",
"1",
"parsed",
"=",
"[",
"]",
"for",
"recnum",
"in",
"range",
"(",
"count",
")",
":",
"cur",
"=",
"[",
"]",
"for",
"f",
"in",
"sensor_data_array",
".",
"elementType",
".",
"field",
":",
"cur_val",
"=",
"rem_values",
"[",
"val_idx",
"]",
"val_idx",
"+=",
"1",
"m",
"=",
"Member",
"(",
"name",
"=",
"f",
".",
"name",
",",
"standard",
"=",
"f",
".",
"content",
".",
"definition",
")",
"if",
"hasattr",
"(",
"f",
".",
"content",
",",
"\"uom\"",
")",
":",
"m",
"[",
"\"units\"",
"]",
"=",
"f",
".",
"content",
".",
"uom",
"try",
":",
"m",
"[",
"\"value\"",
"]",
"=",
"float",
"(",
"cur_val",
")",
"except",
"ValueError",
":",
"m",
"[",
"\"value\"",
"]",
"=",
"cur_val",
"if",
"len",
"(",
"f",
".",
"quality",
")",
":",
"m",
"[",
"\"quality\"",
"]",
"=",
"[",
"]",
"for",
"qual",
"in",
"f",
".",
"quality",
":",
"cur_qual",
"=",
"rem_values",
"[",
"val_idx",
"]",
"val_idx",
"+=",
"1",
"# @TODO check this against constraints",
"m",
"[",
"\"quality\"",
"]",
".",
"append",
"(",
"cur_qual",
")",
"cur",
".",
"append",
"(",
"m",
")",
"parsed",
".",
"append",
"(",
"cur",
")",
"return",
"parsed",
",",
"val_idx"
] | Parses values via sensor data record passed in.
Returns parsed values AND how many items it consumed out of rem_values. | [
"Parses",
"values",
"via",
"sensor",
"data",
"record",
"passed",
"in",
".",
"Returns",
"parsed",
"values",
"AND",
"how",
"many",
"items",
"it",
"consumed",
"out",
"of",
"rem_values",
"."
] | 908660385029ecd8eccda8ab3a6b20b47b915c77 | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/timeseries_profile.py#L393-L446 | train |
IRC-SPHERE/HyperStream | hyperstream/online_engine.py | OnlineEngine.execute | def execute(self, debug=False):
"""
Execute the engine - currently simple executes all workflows.
"""
if debug:
# Set some default times for execution (debugging)
start_time = datetime(year=2016, month=10, day=19, hour=12, minute=28, tzinfo=UTC)
duration = timedelta(seconds=5)
end_time = start_time + duration
relative_interval = RelativeTimeInterval(0, 0)
time_interval = TimeInterval(start_time, end_time)
# workflow_id = "lda_localisation_model_predict"
else:
duration = 0 # not needed
relative_interval = self.hyperstream.config.online_engine.interval
time_interval = relative_interval.absolute(utcnow())
for _ in range(self.hyperstream.config.online_engine.iterations):
if not debug:
# if this takes more than x minutes, kill myself
signal.alarm(self.hyperstream.config.online_engine.alarm)
logging.info("Online engine starting up.")
# self.hyperstream.workflow_manager.set_requested_intervals(workflow_id, TimeIntervals([time_interval]))
self.hyperstream.workflow_manager.set_all_requested_intervals(TimeIntervals([time_interval]))
self.hyperstream.workflow_manager.execute_all()
logging.info("Online engine shutting down.")
logging.info("")
sleep(self.hyperstream.config.online_engine.sleep)
if debug:
time_interval += duration
else:
time_interval = TimeInterval(time_interval.end, utcnow() + timedelta(seconds=relative_interval.end)) | python | def execute(self, debug=False):
"""
Execute the engine - currently simple executes all workflows.
"""
if debug:
# Set some default times for execution (debugging)
start_time = datetime(year=2016, month=10, day=19, hour=12, minute=28, tzinfo=UTC)
duration = timedelta(seconds=5)
end_time = start_time + duration
relative_interval = RelativeTimeInterval(0, 0)
time_interval = TimeInterval(start_time, end_time)
# workflow_id = "lda_localisation_model_predict"
else:
duration = 0 # not needed
relative_interval = self.hyperstream.config.online_engine.interval
time_interval = relative_interval.absolute(utcnow())
for _ in range(self.hyperstream.config.online_engine.iterations):
if not debug:
# if this takes more than x minutes, kill myself
signal.alarm(self.hyperstream.config.online_engine.alarm)
logging.info("Online engine starting up.")
# self.hyperstream.workflow_manager.set_requested_intervals(workflow_id, TimeIntervals([time_interval]))
self.hyperstream.workflow_manager.set_all_requested_intervals(TimeIntervals([time_interval]))
self.hyperstream.workflow_manager.execute_all()
logging.info("Online engine shutting down.")
logging.info("")
sleep(self.hyperstream.config.online_engine.sleep)
if debug:
time_interval += duration
else:
time_interval = TimeInterval(time_interval.end, utcnow() + timedelta(seconds=relative_interval.end)) | [
"def",
"execute",
"(",
"self",
",",
"debug",
"=",
"False",
")",
":",
"if",
"debug",
":",
"# Set some default times for execution (debugging)",
"start_time",
"=",
"datetime",
"(",
"year",
"=",
"2016",
",",
"month",
"=",
"10",
",",
"day",
"=",
"19",
",",
"hour",
"=",
"12",
",",
"minute",
"=",
"28",
",",
"tzinfo",
"=",
"UTC",
")",
"duration",
"=",
"timedelta",
"(",
"seconds",
"=",
"5",
")",
"end_time",
"=",
"start_time",
"+",
"duration",
"relative_interval",
"=",
"RelativeTimeInterval",
"(",
"0",
",",
"0",
")",
"time_interval",
"=",
"TimeInterval",
"(",
"start_time",
",",
"end_time",
")",
"# workflow_id = \"lda_localisation_model_predict\"",
"else",
":",
"duration",
"=",
"0",
"# not needed",
"relative_interval",
"=",
"self",
".",
"hyperstream",
".",
"config",
".",
"online_engine",
".",
"interval",
"time_interval",
"=",
"relative_interval",
".",
"absolute",
"(",
"utcnow",
"(",
")",
")",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"hyperstream",
".",
"config",
".",
"online_engine",
".",
"iterations",
")",
":",
"if",
"not",
"debug",
":",
"# if this takes more than x minutes, kill myself",
"signal",
".",
"alarm",
"(",
"self",
".",
"hyperstream",
".",
"config",
".",
"online_engine",
".",
"alarm",
")",
"logging",
".",
"info",
"(",
"\"Online engine starting up.\"",
")",
"# self.hyperstream.workflow_manager.set_requested_intervals(workflow_id, TimeIntervals([time_interval]))",
"self",
".",
"hyperstream",
".",
"workflow_manager",
".",
"set_all_requested_intervals",
"(",
"TimeIntervals",
"(",
"[",
"time_interval",
"]",
")",
")",
"self",
".",
"hyperstream",
".",
"workflow_manager",
".",
"execute_all",
"(",
")",
"logging",
".",
"info",
"(",
"\"Online engine shutting down.\"",
")",
"logging",
".",
"info",
"(",
"\"\"",
")",
"sleep",
"(",
"self",
".",
"hyperstream",
".",
"config",
".",
"online_engine",
".",
"sleep",
")",
"if",
"debug",
":",
"time_interval",
"+=",
"duration",
"else",
":",
"time_interval",
"=",
"TimeInterval",
"(",
"time_interval",
".",
"end",
",",
"utcnow",
"(",
")",
"+",
"timedelta",
"(",
"seconds",
"=",
"relative_interval",
".",
"end",
")",
")"
] | Execute the engine - currently simple executes all workflows. | [
"Execute",
"the",
"engine",
"-",
"currently",
"simple",
"executes",
"all",
"workflows",
"."
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/online_engine.py#L47-L85 | train |
NaPs/Kolekto | kolekto/commands/show.py | show | def show(movie):
""" Show the movie metadata.
"""
for key, value in sorted(movie.iteritems(), cmp=metadata_sorter, key=lambda x: x[0]):
if isinstance(value, list):
if not value:
continue
other = value[1:]
value = value[0]
else:
other = []
printer.p('<b>{key}</b>: {value}', key=key, value=value)
for value in other:
printer.p('{pad}{value}', value=value, pad=' ' * (len(key) + 2)) | python | def show(movie):
""" Show the movie metadata.
"""
for key, value in sorted(movie.iteritems(), cmp=metadata_sorter, key=lambda x: x[0]):
if isinstance(value, list):
if not value:
continue
other = value[1:]
value = value[0]
else:
other = []
printer.p('<b>{key}</b>: {value}', key=key, value=value)
for value in other:
printer.p('{pad}{value}', value=value, pad=' ' * (len(key) + 2)) | [
"def",
"show",
"(",
"movie",
")",
":",
"for",
"key",
",",
"value",
"in",
"sorted",
"(",
"movie",
".",
"iteritems",
"(",
")",
",",
"cmp",
"=",
"metadata_sorter",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"if",
"not",
"value",
":",
"continue",
"other",
"=",
"value",
"[",
"1",
":",
"]",
"value",
"=",
"value",
"[",
"0",
"]",
"else",
":",
"other",
"=",
"[",
"]",
"printer",
".",
"p",
"(",
"'<b>{key}</b>: {value}'",
",",
"key",
"=",
"key",
",",
"value",
"=",
"value",
")",
"for",
"value",
"in",
"other",
":",
"printer",
".",
"p",
"(",
"'{pad}{value}'",
",",
"value",
"=",
"value",
",",
"pad",
"=",
"' '",
"*",
"(",
"len",
"(",
"key",
")",
"+",
"2",
")",
")"
] | Show the movie metadata. | [
"Show",
"the",
"movie",
"metadata",
"."
] | 29c5469da8782780a06bf9a76c59414bb6fd8fe3 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/show.py#L11-L24 | train |
NaPs/Kolekto | kolekto/commands/show.py | metadata_sorter | def metadata_sorter(x, y):
""" Sort metadata keys by priority.
"""
if x == y:
return 0
if x in METADATA_SORTER_FIRST and y in METADATA_SORTER_FIRST:
return -1 if METADATA_SORTER_FIRST.index(x) < METADATA_SORTER_FIRST.index(y) else 1
elif x in METADATA_SORTER_FIRST:
return -1
elif y in METADATA_SORTER_FIRST:
return 1
else:
if x.startswith('_') and y.startswith('_'):
return cmp(x[1:], y[1:])
elif x.startswith('_'):
return 1
elif y.startswith('_'):
return -1
else:
return cmp(x, y) | python | def metadata_sorter(x, y):
""" Sort metadata keys by priority.
"""
if x == y:
return 0
if x in METADATA_SORTER_FIRST and y in METADATA_SORTER_FIRST:
return -1 if METADATA_SORTER_FIRST.index(x) < METADATA_SORTER_FIRST.index(y) else 1
elif x in METADATA_SORTER_FIRST:
return -1
elif y in METADATA_SORTER_FIRST:
return 1
else:
if x.startswith('_') and y.startswith('_'):
return cmp(x[1:], y[1:])
elif x.startswith('_'):
return 1
elif y.startswith('_'):
return -1
else:
return cmp(x, y) | [
"def",
"metadata_sorter",
"(",
"x",
",",
"y",
")",
":",
"if",
"x",
"==",
"y",
":",
"return",
"0",
"if",
"x",
"in",
"METADATA_SORTER_FIRST",
"and",
"y",
"in",
"METADATA_SORTER_FIRST",
":",
"return",
"-",
"1",
"if",
"METADATA_SORTER_FIRST",
".",
"index",
"(",
"x",
")",
"<",
"METADATA_SORTER_FIRST",
".",
"index",
"(",
"y",
")",
"else",
"1",
"elif",
"x",
"in",
"METADATA_SORTER_FIRST",
":",
"return",
"-",
"1",
"elif",
"y",
"in",
"METADATA_SORTER_FIRST",
":",
"return",
"1",
"else",
":",
"if",
"x",
".",
"startswith",
"(",
"'_'",
")",
"and",
"y",
".",
"startswith",
"(",
"'_'",
")",
":",
"return",
"cmp",
"(",
"x",
"[",
"1",
":",
"]",
",",
"y",
"[",
"1",
":",
"]",
")",
"elif",
"x",
".",
"startswith",
"(",
"'_'",
")",
":",
"return",
"1",
"elif",
"y",
".",
"startswith",
"(",
"'_'",
")",
":",
"return",
"-",
"1",
"else",
":",
"return",
"cmp",
"(",
"x",
",",
"y",
")"
] | Sort metadata keys by priority. | [
"Sort",
"metadata",
"keys",
"by",
"priority",
"."
] | 29c5469da8782780a06bf9a76c59414bb6fd8fe3 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/show.py#L27-L46 | train |
finklabs/korg | korg/korg.py | parse_lines | def parse_lines(log_parsers, fileinp):
"""parse lines from the fileinput and send them to the log_parsers"""
while 1:
logentry = fileinp.readline()
if not logentry:
break
elif not logentry.rstrip():
continue # skip newlines
processed = False
for lp in log_parsers:
if lp.grok(logentry):
processed = True
if not processed:
# error: none of the logparsers worked on the line
logger = logging.getLogger('logparser')
logger.warning(
#'Could not parse line %s, in file %s >>>%s<<<',
#fileinp.lineno(), fileinp.filename(), line.rstrip())
'Could not parse line >>>%s<<<', logentry.rstrip())
print('Could not parse line >>>%s<<<' % logentry.rstrip()) | python | def parse_lines(log_parsers, fileinp):
"""parse lines from the fileinput and send them to the log_parsers"""
while 1:
logentry = fileinp.readline()
if not logentry:
break
elif not logentry.rstrip():
continue # skip newlines
processed = False
for lp in log_parsers:
if lp.grok(logentry):
processed = True
if not processed:
# error: none of the logparsers worked on the line
logger = logging.getLogger('logparser')
logger.warning(
#'Could not parse line %s, in file %s >>>%s<<<',
#fileinp.lineno(), fileinp.filename(), line.rstrip())
'Could not parse line >>>%s<<<', logentry.rstrip())
print('Could not parse line >>>%s<<<' % logentry.rstrip()) | [
"def",
"parse_lines",
"(",
"log_parsers",
",",
"fileinp",
")",
":",
"while",
"1",
":",
"logentry",
"=",
"fileinp",
".",
"readline",
"(",
")",
"if",
"not",
"logentry",
":",
"break",
"elif",
"not",
"logentry",
".",
"rstrip",
"(",
")",
":",
"continue",
"# skip newlines",
"processed",
"=",
"False",
"for",
"lp",
"in",
"log_parsers",
":",
"if",
"lp",
".",
"grok",
"(",
"logentry",
")",
":",
"processed",
"=",
"True",
"if",
"not",
"processed",
":",
"# error: none of the logparsers worked on the line",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'logparser'",
")",
"logger",
".",
"warning",
"(",
"#'Could not parse line %s, in file %s >>>%s<<<',",
"#fileinp.lineno(), fileinp.filename(), line.rstrip())",
"'Could not parse line >>>%s<<<'",
",",
"logentry",
".",
"rstrip",
"(",
")",
")",
"print",
"(",
"'Could not parse line >>>%s<<<'",
"%",
"logentry",
".",
"rstrip",
"(",
")",
")"
] | parse lines from the fileinput and send them to the log_parsers | [
"parse",
"lines",
"from",
"the",
"fileinput",
"and",
"send",
"them",
"to",
"the",
"log_parsers"
] | e931a673ce4bc79cdf26cb4f697fa23fa8a72e4f | https://github.com/finklabs/korg/blob/e931a673ce4bc79cdf26cb4f697fa23fa8a72e4f/korg/korg.py#L24-L44 | train |
NaPs/Kolekto | kolekto/profiles/__init__.py | Profile.load_commands | def load_commands(self, parser):
""" Load commands of this profile.
:param parser: argparse parser on which to add commands
"""
entrypoints = self._get_entrypoints()
already_loaded = set()
for entrypoint in entrypoints:
if entrypoint.name not in already_loaded:
command_class = entrypoint.load()
command_class(entrypoint.name, self, parser).prepare()
already_loaded.add(entrypoint.name) | python | def load_commands(self, parser):
""" Load commands of this profile.
:param parser: argparse parser on which to add commands
"""
entrypoints = self._get_entrypoints()
already_loaded = set()
for entrypoint in entrypoints:
if entrypoint.name not in already_loaded:
command_class = entrypoint.load()
command_class(entrypoint.name, self, parser).prepare()
already_loaded.add(entrypoint.name) | [
"def",
"load_commands",
"(",
"self",
",",
"parser",
")",
":",
"entrypoints",
"=",
"self",
".",
"_get_entrypoints",
"(",
")",
"already_loaded",
"=",
"set",
"(",
")",
"for",
"entrypoint",
"in",
"entrypoints",
":",
"if",
"entrypoint",
".",
"name",
"not",
"in",
"already_loaded",
":",
"command_class",
"=",
"entrypoint",
".",
"load",
"(",
")",
"command_class",
"(",
"entrypoint",
".",
"name",
",",
"self",
",",
"parser",
")",
".",
"prepare",
"(",
")",
"already_loaded",
".",
"add",
"(",
"entrypoint",
".",
"name",
")"
] | Load commands of this profile.
:param parser: argparse parser on which to add commands | [
"Load",
"commands",
"of",
"this",
"profile",
"."
] | 29c5469da8782780a06bf9a76c59414bb6fd8fe3 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/profiles/__init__.py#L19-L32 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.