repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
thiagopbueno/tf-rddlsim | tfrddlsim/simulation/policy_simulator.py | PolicySimulationCell._tensors | def _tensors(cls, fluents: Sequence[FluentPair]) -> Iterable[tf.Tensor]:
'''Yields the `fluents`' tensors.'''
for _, fluent in fluents:
tensor = cls._output_size(fluent.tensor)
yield tensor | python | def _tensors(cls, fluents: Sequence[FluentPair]) -> Iterable[tf.Tensor]:
'''Yields the `fluents`' tensors.'''
for _, fluent in fluents:
tensor = cls._output_size(fluent.tensor)
yield tensor | [
"def",
"_tensors",
"(",
"cls",
",",
"fluents",
":",
"Sequence",
"[",
"FluentPair",
"]",
")",
"->",
"Iterable",
"[",
"tf",
".",
"Tensor",
"]",
":",
"for",
"_",
",",
"fluent",
"in",
"fluents",
":",
"tensor",
"=",
"cls",
".",
"_output_size",
"(",
"fluent",
".",
"tensor",
")",
"yield",
"tensor"
]
| Yields the `fluents`' tensors. | [
"Yields",
"the",
"fluents",
"tensors",
"."
]
| d7102a0ad37d179dbb23141640254ea383d3b43f | https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L157-L161 | train |
thiagopbueno/tf-rddlsim | tfrddlsim/simulation/policy_simulator.py | PolicySimulationCell._dtype | def _dtype(cls, tensor: tf.Tensor) -> tf.Tensor:
'''Converts `tensor` to tf.float32 datatype if needed.'''
if tensor.dtype != tf.float32:
tensor = tf.cast(tensor, tf.float32)
return tensor | python | def _dtype(cls, tensor: tf.Tensor) -> tf.Tensor:
'''Converts `tensor` to tf.float32 datatype if needed.'''
if tensor.dtype != tf.float32:
tensor = tf.cast(tensor, tf.float32)
return tensor | [
"def",
"_dtype",
"(",
"cls",
",",
"tensor",
":",
"tf",
".",
"Tensor",
")",
"->",
"tf",
".",
"Tensor",
":",
"if",
"tensor",
".",
"dtype",
"!=",
"tf",
".",
"float32",
":",
"tensor",
"=",
"tf",
".",
"cast",
"(",
"tensor",
",",
"tf",
".",
"float32",
")",
"return",
"tensor"
]
| Converts `tensor` to tf.float32 datatype if needed. | [
"Converts",
"tensor",
"to",
"tf",
".",
"float32",
"datatype",
"if",
"needed",
"."
]
| d7102a0ad37d179dbb23141640254ea383d3b43f | https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L164-L168 | train |
thiagopbueno/tf-rddlsim | tfrddlsim/simulation/policy_simulator.py | PolicySimulationCell._output | def _output(cls, fluents: Sequence[FluentPair]) -> Sequence[tf.Tensor]:
'''Returns output tensors for `fluents`.'''
return tuple(cls._dtype(t) for t in cls._tensors(fluents)) | python | def _output(cls, fluents: Sequence[FluentPair]) -> Sequence[tf.Tensor]:
'''Returns output tensors for `fluents`.'''
return tuple(cls._dtype(t) for t in cls._tensors(fluents)) | [
"def",
"_output",
"(",
"cls",
",",
"fluents",
":",
"Sequence",
"[",
"FluentPair",
"]",
")",
"->",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
":",
"return",
"tuple",
"(",
"cls",
".",
"_dtype",
"(",
"t",
")",
"for",
"t",
"in",
"cls",
".",
"_tensors",
"(",
"fluents",
")",
")"
]
| Returns output tensors for `fluents`. | [
"Returns",
"output",
"tensors",
"for",
"fluents",
"."
]
| d7102a0ad37d179dbb23141640254ea383d3b43f | https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L171-L173 | train |
thiagopbueno/tf-rddlsim | tfrddlsim/simulation/policy_simulator.py | PolicySimulator.output_size | def output_size(self) -> Tuple[Sequence[Shape], Sequence[Shape], Sequence[Shape], int]:
'''Returns the simulation output size.'''
return self._cell.output_size | python | def output_size(self) -> Tuple[Sequence[Shape], Sequence[Shape], Sequence[Shape], int]:
'''Returns the simulation output size.'''
return self._cell.output_size | [
"def",
"output_size",
"(",
"self",
")",
"->",
"Tuple",
"[",
"Sequence",
"[",
"Shape",
"]",
",",
"Sequence",
"[",
"Shape",
"]",
",",
"Sequence",
"[",
"Shape",
"]",
",",
"int",
"]",
":",
"return",
"self",
".",
"_cell",
".",
"output_size"
]
| Returns the simulation output size. | [
"Returns",
"the",
"simulation",
"output",
"size",
"."
]
| d7102a0ad37d179dbb23141640254ea383d3b43f | https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L214-L216 | train |
thiagopbueno/tf-rddlsim | tfrddlsim/simulation/policy_simulator.py | PolicySimulator.timesteps | def timesteps(self, horizon: int) -> tf.Tensor:
'''Returns the input tensor for the given `horizon`.'''
start, limit, delta = horizon - 1, -1, -1
timesteps_range = tf.range(start, limit, delta, dtype=tf.float32)
timesteps_range = tf.expand_dims(timesteps_range, -1)
batch_timesteps = tf.stack([timesteps_range] * self.batch_size)
return batch_timesteps | python | def timesteps(self, horizon: int) -> tf.Tensor:
'''Returns the input tensor for the given `horizon`.'''
start, limit, delta = horizon - 1, -1, -1
timesteps_range = tf.range(start, limit, delta, dtype=tf.float32)
timesteps_range = tf.expand_dims(timesteps_range, -1)
batch_timesteps = tf.stack([timesteps_range] * self.batch_size)
return batch_timesteps | [
"def",
"timesteps",
"(",
"self",
",",
"horizon",
":",
"int",
")",
"->",
"tf",
".",
"Tensor",
":",
"start",
",",
"limit",
",",
"delta",
"=",
"horizon",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
"timesteps_range",
"=",
"tf",
".",
"range",
"(",
"start",
",",
"limit",
",",
"delta",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"timesteps_range",
"=",
"tf",
".",
"expand_dims",
"(",
"timesteps_range",
",",
"-",
"1",
")",
"batch_timesteps",
"=",
"tf",
".",
"stack",
"(",
"[",
"timesteps_range",
"]",
"*",
"self",
".",
"batch_size",
")",
"return",
"batch_timesteps"
]
| Returns the input tensor for the given `horizon`. | [
"Returns",
"the",
"input",
"tensor",
"for",
"the",
"given",
"horizon",
"."
]
| d7102a0ad37d179dbb23141640254ea383d3b43f | https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L218-L224 | train |
thiagopbueno/tf-rddlsim | tfrddlsim/simulation/policy_simulator.py | PolicySimulator.trajectory | def trajectory(self,
horizon: int,
initial_state: Optional[StateTensor] = None) -> TrajectoryOutput:
'''Returns the ops for the trajectory generation with given `horizon`
and `initial_state`.
The simulation returns states, actions and interms as a
sequence of tensors (i.e., all representations are factored).
The reward is a batch sized tensor.
The trajectoty output is a tuple: (initial_state, states, actions, interms, rewards).
If initial state is None, use default compiler's initial state.
Note:
All tensors have shape: (batch_size, horizon, fluent_shape).
Except initial state that has shape: (batch_size, fluent_shape).
Args:
horizon (int): The number of simulation timesteps.
initial_state (Optional[Sequence[tf.Tensor]]): The initial state tensors.
Returns:
Tuple[StateTensor, StatesTensor, ActionsTensor, IntermsTensor, tf.Tensor]: Trajectory output tuple.
'''
if initial_state is None:
initial_state = self._cell.initial_state()
with self.graph.as_default():
self.inputs = self.timesteps(horizon)
outputs, _ = tf.nn.dynamic_rnn(
self._cell,
self.inputs,
initial_state=initial_state,
dtype=tf.float32,
scope="trajectory")
states, actions, interms, rewards = outputs
# fluent types
state_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.state_range_type)
states = self._output(states, state_dtype)
interm_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.interm_range_type)
interms = self._output(interms, interm_dtype)
action_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.action_range_type)
actions = self._output(actions, action_dtype)
outputs = (initial_state, states, actions, interms, rewards)
return outputs | python | def trajectory(self,
horizon: int,
initial_state: Optional[StateTensor] = None) -> TrajectoryOutput:
'''Returns the ops for the trajectory generation with given `horizon`
and `initial_state`.
The simulation returns states, actions and interms as a
sequence of tensors (i.e., all representations are factored).
The reward is a batch sized tensor.
The trajectoty output is a tuple: (initial_state, states, actions, interms, rewards).
If initial state is None, use default compiler's initial state.
Note:
All tensors have shape: (batch_size, horizon, fluent_shape).
Except initial state that has shape: (batch_size, fluent_shape).
Args:
horizon (int): The number of simulation timesteps.
initial_state (Optional[Sequence[tf.Tensor]]): The initial state tensors.
Returns:
Tuple[StateTensor, StatesTensor, ActionsTensor, IntermsTensor, tf.Tensor]: Trajectory output tuple.
'''
if initial_state is None:
initial_state = self._cell.initial_state()
with self.graph.as_default():
self.inputs = self.timesteps(horizon)
outputs, _ = tf.nn.dynamic_rnn(
self._cell,
self.inputs,
initial_state=initial_state,
dtype=tf.float32,
scope="trajectory")
states, actions, interms, rewards = outputs
# fluent types
state_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.state_range_type)
states = self._output(states, state_dtype)
interm_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.interm_range_type)
interms = self._output(interms, interm_dtype)
action_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.action_range_type)
actions = self._output(actions, action_dtype)
outputs = (initial_state, states, actions, interms, rewards)
return outputs | [
"def",
"trajectory",
"(",
"self",
",",
"horizon",
":",
"int",
",",
"initial_state",
":",
"Optional",
"[",
"StateTensor",
"]",
"=",
"None",
")",
"->",
"TrajectoryOutput",
":",
"if",
"initial_state",
"is",
"None",
":",
"initial_state",
"=",
"self",
".",
"_cell",
".",
"initial_state",
"(",
")",
"with",
"self",
".",
"graph",
".",
"as_default",
"(",
")",
":",
"self",
".",
"inputs",
"=",
"self",
".",
"timesteps",
"(",
"horizon",
")",
"outputs",
",",
"_",
"=",
"tf",
".",
"nn",
".",
"dynamic_rnn",
"(",
"self",
".",
"_cell",
",",
"self",
".",
"inputs",
",",
"initial_state",
"=",
"initial_state",
",",
"dtype",
"=",
"tf",
".",
"float32",
",",
"scope",
"=",
"\"trajectory\"",
")",
"states",
",",
"actions",
",",
"interms",
",",
"rewards",
"=",
"outputs",
"# fluent types",
"state_dtype",
"=",
"map",
"(",
"rddl2tf",
".",
"utils",
".",
"range_type_to_dtype",
",",
"self",
".",
"_cell",
".",
"_compiler",
".",
"rddl",
".",
"state_range_type",
")",
"states",
"=",
"self",
".",
"_output",
"(",
"states",
",",
"state_dtype",
")",
"interm_dtype",
"=",
"map",
"(",
"rddl2tf",
".",
"utils",
".",
"range_type_to_dtype",
",",
"self",
".",
"_cell",
".",
"_compiler",
".",
"rddl",
".",
"interm_range_type",
")",
"interms",
"=",
"self",
".",
"_output",
"(",
"interms",
",",
"interm_dtype",
")",
"action_dtype",
"=",
"map",
"(",
"rddl2tf",
".",
"utils",
".",
"range_type_to_dtype",
",",
"self",
".",
"_cell",
".",
"_compiler",
".",
"rddl",
".",
"action_range_type",
")",
"actions",
"=",
"self",
".",
"_output",
"(",
"actions",
",",
"action_dtype",
")",
"outputs",
"=",
"(",
"initial_state",
",",
"states",
",",
"actions",
",",
"interms",
",",
"rewards",
")",
"return",
"outputs"
]
| Returns the ops for the trajectory generation with given `horizon`
and `initial_state`.
The simulation returns states, actions and interms as a
sequence of tensors (i.e., all representations are factored).
The reward is a batch sized tensor.
The trajectoty output is a tuple: (initial_state, states, actions, interms, rewards).
If initial state is None, use default compiler's initial state.
Note:
All tensors have shape: (batch_size, horizon, fluent_shape).
Except initial state that has shape: (batch_size, fluent_shape).
Args:
horizon (int): The number of simulation timesteps.
initial_state (Optional[Sequence[tf.Tensor]]): The initial state tensors.
Returns:
Tuple[StateTensor, StatesTensor, ActionsTensor, IntermsTensor, tf.Tensor]: Trajectory output tuple. | [
"Returns",
"the",
"ops",
"for",
"the",
"trajectory",
"generation",
"with",
"given",
"horizon",
"and",
"initial_state",
"."
]
| d7102a0ad37d179dbb23141640254ea383d3b43f | https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L226-L272 | train |
thiagopbueno/tf-rddlsim | tfrddlsim/simulation/policy_simulator.py | PolicySimulator.run | def run(self,
horizon: int,
initial_state: Optional[StateTensor] = None) -> SimulationOutput:
'''Builds the MDP graph and simulates in batch the trajectories
with given `horizon`. Returns the non-fluents, states, actions, interms
and rewards. Fluents and non-fluents are returned in factored form.
Note:
All output arrays have shape: (batch_size, horizon, fluent_shape).
Except initial state that has shape: (batch_size, fluent_shape).
Args:
horizon (int): The number of timesteps in the simulation.
initial_state (Optional[Sequence[tf.Tensor]]): The initial state tensors.
Returns:
Tuple[NonFluentsArray, StatesArray, ActionsArray, IntermsArray, np.array]: Simulation ouput tuple.
'''
trajectory = self.trajectory(horizon, initial_state)
with tf.Session(graph=self.graph) as sess:
sess.run(tf.global_variables_initializer())
non_fluents = sess.run(self._non_fluents)
initial_state, states, actions, interms, rewards = sess.run(trajectory)
# non-fluents
non_fluent_ordering = self._cell._compiler.rddl.domain.non_fluent_ordering
non_fluents = tuple(zip(non_fluent_ordering, non_fluents))
# states
state_fluent_ordering = self._cell._compiler.rddl.domain.state_fluent_ordering
states = tuple(zip(state_fluent_ordering, states))
# interms
interm_fluent_ordering = self._cell._compiler.rddl.domain.interm_fluent_ordering
interms = tuple(zip(interm_fluent_ordering, interms))
# actions
action_fluent_ordering = self._cell._compiler.rddl.domain.action_fluent_ordering
actions = tuple(zip(action_fluent_ordering, actions))
# rewards
rewards = np.squeeze(rewards)
outputs = (non_fluents, initial_state, states, actions, interms, rewards)
return outputs | python | def run(self,
horizon: int,
initial_state: Optional[StateTensor] = None) -> SimulationOutput:
'''Builds the MDP graph and simulates in batch the trajectories
with given `horizon`. Returns the non-fluents, states, actions, interms
and rewards. Fluents and non-fluents are returned in factored form.
Note:
All output arrays have shape: (batch_size, horizon, fluent_shape).
Except initial state that has shape: (batch_size, fluent_shape).
Args:
horizon (int): The number of timesteps in the simulation.
initial_state (Optional[Sequence[tf.Tensor]]): The initial state tensors.
Returns:
Tuple[NonFluentsArray, StatesArray, ActionsArray, IntermsArray, np.array]: Simulation ouput tuple.
'''
trajectory = self.trajectory(horizon, initial_state)
with tf.Session(graph=self.graph) as sess:
sess.run(tf.global_variables_initializer())
non_fluents = sess.run(self._non_fluents)
initial_state, states, actions, interms, rewards = sess.run(trajectory)
# non-fluents
non_fluent_ordering = self._cell._compiler.rddl.domain.non_fluent_ordering
non_fluents = tuple(zip(non_fluent_ordering, non_fluents))
# states
state_fluent_ordering = self._cell._compiler.rddl.domain.state_fluent_ordering
states = tuple(zip(state_fluent_ordering, states))
# interms
interm_fluent_ordering = self._cell._compiler.rddl.domain.interm_fluent_ordering
interms = tuple(zip(interm_fluent_ordering, interms))
# actions
action_fluent_ordering = self._cell._compiler.rddl.domain.action_fluent_ordering
actions = tuple(zip(action_fluent_ordering, actions))
# rewards
rewards = np.squeeze(rewards)
outputs = (non_fluents, initial_state, states, actions, interms, rewards)
return outputs | [
"def",
"run",
"(",
"self",
",",
"horizon",
":",
"int",
",",
"initial_state",
":",
"Optional",
"[",
"StateTensor",
"]",
"=",
"None",
")",
"->",
"SimulationOutput",
":",
"trajectory",
"=",
"self",
".",
"trajectory",
"(",
"horizon",
",",
"initial_state",
")",
"with",
"tf",
".",
"Session",
"(",
"graph",
"=",
"self",
".",
"graph",
")",
"as",
"sess",
":",
"sess",
".",
"run",
"(",
"tf",
".",
"global_variables_initializer",
"(",
")",
")",
"non_fluents",
"=",
"sess",
".",
"run",
"(",
"self",
".",
"_non_fluents",
")",
"initial_state",
",",
"states",
",",
"actions",
",",
"interms",
",",
"rewards",
"=",
"sess",
".",
"run",
"(",
"trajectory",
")",
"# non-fluents",
"non_fluent_ordering",
"=",
"self",
".",
"_cell",
".",
"_compiler",
".",
"rddl",
".",
"domain",
".",
"non_fluent_ordering",
"non_fluents",
"=",
"tuple",
"(",
"zip",
"(",
"non_fluent_ordering",
",",
"non_fluents",
")",
")",
"# states",
"state_fluent_ordering",
"=",
"self",
".",
"_cell",
".",
"_compiler",
".",
"rddl",
".",
"domain",
".",
"state_fluent_ordering",
"states",
"=",
"tuple",
"(",
"zip",
"(",
"state_fluent_ordering",
",",
"states",
")",
")",
"# interms",
"interm_fluent_ordering",
"=",
"self",
".",
"_cell",
".",
"_compiler",
".",
"rddl",
".",
"domain",
".",
"interm_fluent_ordering",
"interms",
"=",
"tuple",
"(",
"zip",
"(",
"interm_fluent_ordering",
",",
"interms",
")",
")",
"# actions",
"action_fluent_ordering",
"=",
"self",
".",
"_cell",
".",
"_compiler",
".",
"rddl",
".",
"domain",
".",
"action_fluent_ordering",
"actions",
"=",
"tuple",
"(",
"zip",
"(",
"action_fluent_ordering",
",",
"actions",
")",
")",
"# rewards",
"rewards",
"=",
"np",
".",
"squeeze",
"(",
"rewards",
")",
"outputs",
"=",
"(",
"non_fluents",
",",
"initial_state",
",",
"states",
",",
"actions",
",",
"interms",
",",
"rewards",
")",
"return",
"outputs"
]
| Builds the MDP graph and simulates in batch the trajectories
with given `horizon`. Returns the non-fluents, states, actions, interms
and rewards. Fluents and non-fluents are returned in factored form.
Note:
All output arrays have shape: (batch_size, horizon, fluent_shape).
Except initial state that has shape: (batch_size, fluent_shape).
Args:
horizon (int): The number of timesteps in the simulation.
initial_state (Optional[Sequence[tf.Tensor]]): The initial state tensors.
Returns:
Tuple[NonFluentsArray, StatesArray, ActionsArray, IntermsArray, np.array]: Simulation ouput tuple. | [
"Builds",
"the",
"MDP",
"graph",
"and",
"simulates",
"in",
"batch",
"the",
"trajectories",
"with",
"given",
"horizon",
".",
"Returns",
"the",
"non",
"-",
"fluents",
"states",
"actions",
"interms",
"and",
"rewards",
".",
"Fluents",
"and",
"non",
"-",
"fluents",
"are",
"returned",
"in",
"factored",
"form",
"."
]
| d7102a0ad37d179dbb23141640254ea383d3b43f | https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L274-L320 | train |
thiagopbueno/tf-rddlsim | tfrddlsim/simulation/policy_simulator.py | PolicySimulator._output | def _output(cls,
tensors: Sequence[tf.Tensor],
dtypes: Sequence[tf.DType]) -> Sequence[tf.Tensor]:
'''Converts `tensors` to the corresponding `dtypes`.'''
outputs = []
for tensor, dtype in zip(tensors, dtypes):
tensor = tensor[0]
if tensor.dtype != dtype:
tensor = tf.cast(tensor, dtype)
outputs.append(tensor)
return tuple(outputs) | python | def _output(cls,
tensors: Sequence[tf.Tensor],
dtypes: Sequence[tf.DType]) -> Sequence[tf.Tensor]:
'''Converts `tensors` to the corresponding `dtypes`.'''
outputs = []
for tensor, dtype in zip(tensors, dtypes):
tensor = tensor[0]
if tensor.dtype != dtype:
tensor = tf.cast(tensor, dtype)
outputs.append(tensor)
return tuple(outputs) | [
"def",
"_output",
"(",
"cls",
",",
"tensors",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
",",
"dtypes",
":",
"Sequence",
"[",
"tf",
".",
"DType",
"]",
")",
"->",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
":",
"outputs",
"=",
"[",
"]",
"for",
"tensor",
",",
"dtype",
"in",
"zip",
"(",
"tensors",
",",
"dtypes",
")",
":",
"tensor",
"=",
"tensor",
"[",
"0",
"]",
"if",
"tensor",
".",
"dtype",
"!=",
"dtype",
":",
"tensor",
"=",
"tf",
".",
"cast",
"(",
"tensor",
",",
"dtype",
")",
"outputs",
".",
"append",
"(",
"tensor",
")",
"return",
"tuple",
"(",
"outputs",
")"
]
| Converts `tensors` to the corresponding `dtypes`. | [
"Converts",
"tensors",
"to",
"the",
"corresponding",
"dtypes",
"."
]
| d7102a0ad37d179dbb23141640254ea383d3b43f | https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L323-L333 | train |
pgxcentre/geneparse | geneparse/readers/impute2.py | Impute2Reader._get_biallelic_variant | def _get_biallelic_variant(self, variant, info, _check_alleles=True):
"""Creates a bi-allelic variant."""
info = info.iloc[0, :]
assert not info.multiallelic
# Seeking and parsing the file
self._impute2_file.seek(info.seek)
genotypes = self._parse_impute2_line(self._impute2_file.readline())
variant_alleles = variant._encode_alleles([
genotypes.reference, genotypes.coded,
])
if (_check_alleles and variant_alleles != variant.alleles):
# Variant with requested alleles is unavailable.
logging.variant_not_found(variant)
return []
return [genotypes] | python | def _get_biallelic_variant(self, variant, info, _check_alleles=True):
"""Creates a bi-allelic variant."""
info = info.iloc[0, :]
assert not info.multiallelic
# Seeking and parsing the file
self._impute2_file.seek(info.seek)
genotypes = self._parse_impute2_line(self._impute2_file.readline())
variant_alleles = variant._encode_alleles([
genotypes.reference, genotypes.coded,
])
if (_check_alleles and variant_alleles != variant.alleles):
# Variant with requested alleles is unavailable.
logging.variant_not_found(variant)
return []
return [genotypes] | [
"def",
"_get_biallelic_variant",
"(",
"self",
",",
"variant",
",",
"info",
",",
"_check_alleles",
"=",
"True",
")",
":",
"info",
"=",
"info",
".",
"iloc",
"[",
"0",
",",
":",
"]",
"assert",
"not",
"info",
".",
"multiallelic",
"# Seeking and parsing the file",
"self",
".",
"_impute2_file",
".",
"seek",
"(",
"info",
".",
"seek",
")",
"genotypes",
"=",
"self",
".",
"_parse_impute2_line",
"(",
"self",
".",
"_impute2_file",
".",
"readline",
"(",
")",
")",
"variant_alleles",
"=",
"variant",
".",
"_encode_alleles",
"(",
"[",
"genotypes",
".",
"reference",
",",
"genotypes",
".",
"coded",
",",
"]",
")",
"if",
"(",
"_check_alleles",
"and",
"variant_alleles",
"!=",
"variant",
".",
"alleles",
")",
":",
"# Variant with requested alleles is unavailable.",
"logging",
".",
"variant_not_found",
"(",
"variant",
")",
"return",
"[",
"]",
"return",
"[",
"genotypes",
"]"
]
| Creates a bi-allelic variant. | [
"Creates",
"a",
"bi",
"-",
"allelic",
"variant",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/readers/impute2.py#L222-L239 | train |
pgxcentre/geneparse | geneparse/readers/impute2.py | Impute2Reader._fix_genotypes_object | def _fix_genotypes_object(self, genotypes, variant_info):
"""Fixes a genotypes object (variant name, multi-allelic value."""
# Checking the name (if there were duplications)
if self.has_index and variant_info.name != genotypes.variant.name:
if not variant_info.name.startswith(genotypes.variant.name):
raise ValueError("Index file not synced with IMPUTE2 file")
genotypes.variant.name = variant_info.name
# Trying to set multi-allelic information
if self.has_index and self._index_has_location:
# Location was in the index, so we can automatically set the
# multi-allelic state of the genotypes
genotypes.multiallelic = variant_info.multiallelic
else:
# Location was not in the index, so we check one marker before and
# after the one we found
logging.warning("Multiallelic variants are not detected on "
"unindexed files.") | python | def _fix_genotypes_object(self, genotypes, variant_info):
"""Fixes a genotypes object (variant name, multi-allelic value."""
# Checking the name (if there were duplications)
if self.has_index and variant_info.name != genotypes.variant.name:
if not variant_info.name.startswith(genotypes.variant.name):
raise ValueError("Index file not synced with IMPUTE2 file")
genotypes.variant.name = variant_info.name
# Trying to set multi-allelic information
if self.has_index and self._index_has_location:
# Location was in the index, so we can automatically set the
# multi-allelic state of the genotypes
genotypes.multiallelic = variant_info.multiallelic
else:
# Location was not in the index, so we check one marker before and
# after the one we found
logging.warning("Multiallelic variants are not detected on "
"unindexed files.") | [
"def",
"_fix_genotypes_object",
"(",
"self",
",",
"genotypes",
",",
"variant_info",
")",
":",
"# Checking the name (if there were duplications)",
"if",
"self",
".",
"has_index",
"and",
"variant_info",
".",
"name",
"!=",
"genotypes",
".",
"variant",
".",
"name",
":",
"if",
"not",
"variant_info",
".",
"name",
".",
"startswith",
"(",
"genotypes",
".",
"variant",
".",
"name",
")",
":",
"raise",
"ValueError",
"(",
"\"Index file not synced with IMPUTE2 file\"",
")",
"genotypes",
".",
"variant",
".",
"name",
"=",
"variant_info",
".",
"name",
"# Trying to set multi-allelic information",
"if",
"self",
".",
"has_index",
"and",
"self",
".",
"_index_has_location",
":",
"# Location was in the index, so we can automatically set the",
"# multi-allelic state of the genotypes",
"genotypes",
".",
"multiallelic",
"=",
"variant_info",
".",
"multiallelic",
"else",
":",
"# Location was not in the index, so we check one marker before and",
"# after the one we found",
"logging",
".",
"warning",
"(",
"\"Multiallelic variants are not detected on \"",
"\"unindexed files.\"",
")"
]
| Fixes a genotypes object (variant name, multi-allelic value. | [
"Fixes",
"a",
"genotypes",
"object",
"(",
"variant",
"name",
"multi",
"-",
"allelic",
"value",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/readers/impute2.py#L394-L412 | train |
pgxcentre/geneparse | geneparse/readers/plink.py | PlinkReader._normalize_missing | def _normalize_missing(g):
"""Normalize a plink genotype vector."""
g = g.astype(float)
g[g == -1.0] = np.nan
return g | python | def _normalize_missing(g):
"""Normalize a plink genotype vector."""
g = g.astype(float)
g[g == -1.0] = np.nan
return g | [
"def",
"_normalize_missing",
"(",
"g",
")",
":",
"g",
"=",
"g",
".",
"astype",
"(",
"float",
")",
"g",
"[",
"g",
"==",
"-",
"1.0",
"]",
"=",
"np",
".",
"nan",
"return",
"g"
]
| Normalize a plink genotype vector. | [
"Normalize",
"a",
"plink",
"genotype",
"vector",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/readers/plink.py#L276-L280 | train |
255BITS/hyperchamber | examples/shared/cifar_utils.py | maybe_download_and_extract | def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = "/tmp/cifar"
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory) | python | def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = "/tmp/cifar"
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory) | [
"def",
"maybe_download_and_extract",
"(",
")",
":",
"dest_directory",
"=",
"\"/tmp/cifar\"",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dest_directory",
")",
":",
"os",
".",
"makedirs",
"(",
"dest_directory",
")",
"filename",
"=",
"DATA_URL",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dest_directory",
",",
"filename",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filepath",
")",
":",
"def",
"_progress",
"(",
"count",
",",
"block_size",
",",
"total_size",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\r>> Downloading %s %.1f%%'",
"%",
"(",
"filename",
",",
"float",
"(",
"count",
"*",
"block_size",
")",
"/",
"float",
"(",
"total_size",
")",
"*",
"100.0",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"filepath",
",",
"_",
"=",
"urllib",
".",
"request",
".",
"urlretrieve",
"(",
"DATA_URL",
",",
"filepath",
",",
"_progress",
")",
"print",
"(",
")",
"statinfo",
"=",
"os",
".",
"stat",
"(",
"filepath",
")",
"print",
"(",
"'Successfully downloaded'",
",",
"filename",
",",
"statinfo",
".",
"st_size",
",",
"'bytes.'",
")",
"tarfile",
".",
"open",
"(",
"filepath",
",",
"'r:gz'",
")",
".",
"extractall",
"(",
"dest_directory",
")"
]
| Download and extract the tarball from Alex's website. | [
"Download",
"and",
"extract",
"the",
"tarball",
"from",
"Alex",
"s",
"website",
"."
]
| 4d5774bde9ea6ce1113f77a069ffc605148482b8 | https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/examples/shared/cifar_utils.py#L73-L89 | train |
255BITS/hyperchamber | examples/shared/cifar_utils.py | plot | def plot(config, image, file):
""" Plot a single CIFAR image."""
image = np.squeeze(image)
print(file, image.shape)
imsave(file, image) | python | def plot(config, image, file):
""" Plot a single CIFAR image."""
image = np.squeeze(image)
print(file, image.shape)
imsave(file, image) | [
"def",
"plot",
"(",
"config",
",",
"image",
",",
"file",
")",
":",
"image",
"=",
"np",
".",
"squeeze",
"(",
"image",
")",
"print",
"(",
"file",
",",
"image",
".",
"shape",
")",
"imsave",
"(",
"file",
",",
"image",
")"
]
| Plot a single CIFAR image. | [
"Plot",
"a",
"single",
"CIFAR",
"image",
"."
]
| 4d5774bde9ea6ce1113f77a069ffc605148482b8 | https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/examples/shared/cifar_utils.py#L91-L95 | train |
kblin/bioinf-helperlibs | helperlibs/bio/seqio.py | _get_seqtype_from_ext | def _get_seqtype_from_ext(handle):
'''Predict the filetype from a handle's name'''
if isinstance(handle, basestring):
name = handle
elif hasattr(handle, 'filename'):
name = handle.filename
elif hasattr(handle, 'name'):
name = handle.name
else:
raise ValueError("Unknown datatype for handle!")
modifier = ''
dummy, ext = path.splitext(name.lower())
if ext == ".gz":
modifier = 'gz-'
dummy, ext = path.splitext(dummy)
if not ext:
ext = "." + dummy
if ext in (".gbk", ".gb", ".genbank", ".gbff"):
return modifier + "genbank"
elif ext in (".embl", ".emb"):
return modifier + "embl"
elif ext in (".fa", ".fasta", ".fna", ".faa", ".fas"):
return modifier + "fasta"
else:
raise ValueError("Unknown file format '%s'." % ext) | python | def _get_seqtype_from_ext(handle):
'''Predict the filetype from a handle's name'''
if isinstance(handle, basestring):
name = handle
elif hasattr(handle, 'filename'):
name = handle.filename
elif hasattr(handle, 'name'):
name = handle.name
else:
raise ValueError("Unknown datatype for handle!")
modifier = ''
dummy, ext = path.splitext(name.lower())
if ext == ".gz":
modifier = 'gz-'
dummy, ext = path.splitext(dummy)
if not ext:
ext = "." + dummy
if ext in (".gbk", ".gb", ".genbank", ".gbff"):
return modifier + "genbank"
elif ext in (".embl", ".emb"):
return modifier + "embl"
elif ext in (".fa", ".fasta", ".fna", ".faa", ".fas"):
return modifier + "fasta"
else:
raise ValueError("Unknown file format '%s'." % ext) | [
"def",
"_get_seqtype_from_ext",
"(",
"handle",
")",
":",
"if",
"isinstance",
"(",
"handle",
",",
"basestring",
")",
":",
"name",
"=",
"handle",
"elif",
"hasattr",
"(",
"handle",
",",
"'filename'",
")",
":",
"name",
"=",
"handle",
".",
"filename",
"elif",
"hasattr",
"(",
"handle",
",",
"'name'",
")",
":",
"name",
"=",
"handle",
".",
"name",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown datatype for handle!\"",
")",
"modifier",
"=",
"''",
"dummy",
",",
"ext",
"=",
"path",
".",
"splitext",
"(",
"name",
".",
"lower",
"(",
")",
")",
"if",
"ext",
"==",
"\".gz\"",
":",
"modifier",
"=",
"'gz-'",
"dummy",
",",
"ext",
"=",
"path",
".",
"splitext",
"(",
"dummy",
")",
"if",
"not",
"ext",
":",
"ext",
"=",
"\".\"",
"+",
"dummy",
"if",
"ext",
"in",
"(",
"\".gbk\"",
",",
"\".gb\"",
",",
"\".genbank\"",
",",
"\".gbff\"",
")",
":",
"return",
"modifier",
"+",
"\"genbank\"",
"elif",
"ext",
"in",
"(",
"\".embl\"",
",",
"\".emb\"",
")",
":",
"return",
"modifier",
"+",
"\"embl\"",
"elif",
"ext",
"in",
"(",
"\".fa\"",
",",
"\".fasta\"",
",",
"\".fna\"",
",",
"\".faa\"",
",",
"\".fas\"",
")",
":",
"return",
"modifier",
"+",
"\"fasta\"",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown file format '%s'.\"",
"%",
"ext",
")"
]
| Predict the filetype from a handle's name | [
"Predict",
"the",
"filetype",
"from",
"a",
"handle",
"s",
"name"
]
| 3a732d62b4b3cc42675631db886ba534672cb134 | https://github.com/kblin/bioinf-helperlibs/blob/3a732d62b4b3cc42675631db886ba534672cb134/helperlibs/bio/seqio.py#L28-L55 | train |
kblin/bioinf-helperlibs | helperlibs/bio/seqio.py | _guess_seqtype_from_file | def _guess_seqtype_from_file(handle):
"Guess the sequence type from the file's contents"
if isinstance(handle, basestring):
handle = StringIO(handle)
for line in handle:
if not line.strip():
continue
if line.lstrip().split()[0] in ('LOCUS', 'FEATURES', 'source', 'CDS',
'gene'):
return 'genbank'
if len(line) > 2 and line[:3] in ('ID ', 'FT '):
return 'embl'
if line.startswith('>'):
return 'fasta'
handle.seek(0)
import string
from Bio.Data import IUPACData as iupac
all_input_letters = set(handle.read().lower())
all_valid = set(string.digits)
all_valid.update(set(iupac.protein_letters.lower()))
all_valid.update(set(iupac.unambiguous_dna_letters.lower()))
all_valid.update(set('- \n'))
if all_valid.issuperset(all_input_letters):
return 'fasta'
raise ValueError("Failed to guess format for input") | python | def _guess_seqtype_from_file(handle):
"Guess the sequence type from the file's contents"
if isinstance(handle, basestring):
handle = StringIO(handle)
for line in handle:
if not line.strip():
continue
if line.lstrip().split()[0] in ('LOCUS', 'FEATURES', 'source', 'CDS',
'gene'):
return 'genbank'
if len(line) > 2 and line[:3] in ('ID ', 'FT '):
return 'embl'
if line.startswith('>'):
return 'fasta'
handle.seek(0)
import string
from Bio.Data import IUPACData as iupac
all_input_letters = set(handle.read().lower())
all_valid = set(string.digits)
all_valid.update(set(iupac.protein_letters.lower()))
all_valid.update(set(iupac.unambiguous_dna_letters.lower()))
all_valid.update(set('- \n'))
if all_valid.issuperset(all_input_letters):
return 'fasta'
raise ValueError("Failed to guess format for input") | [
"def",
"_guess_seqtype_from_file",
"(",
"handle",
")",
":",
"if",
"isinstance",
"(",
"handle",
",",
"basestring",
")",
":",
"handle",
"=",
"StringIO",
"(",
"handle",
")",
"for",
"line",
"in",
"handle",
":",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"if",
"line",
".",
"lstrip",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"in",
"(",
"'LOCUS'",
",",
"'FEATURES'",
",",
"'source'",
",",
"'CDS'",
",",
"'gene'",
")",
":",
"return",
"'genbank'",
"if",
"len",
"(",
"line",
")",
">",
"2",
"and",
"line",
"[",
":",
"3",
"]",
"in",
"(",
"'ID '",
",",
"'FT '",
")",
":",
"return",
"'embl'",
"if",
"line",
".",
"startswith",
"(",
"'>'",
")",
":",
"return",
"'fasta'",
"handle",
".",
"seek",
"(",
"0",
")",
"import",
"string",
"from",
"Bio",
".",
"Data",
"import",
"IUPACData",
"as",
"iupac",
"all_input_letters",
"=",
"set",
"(",
"handle",
".",
"read",
"(",
")",
".",
"lower",
"(",
")",
")",
"all_valid",
"=",
"set",
"(",
"string",
".",
"digits",
")",
"all_valid",
".",
"update",
"(",
"set",
"(",
"iupac",
".",
"protein_letters",
".",
"lower",
"(",
")",
")",
")",
"all_valid",
".",
"update",
"(",
"set",
"(",
"iupac",
".",
"unambiguous_dna_letters",
".",
"lower",
"(",
")",
")",
")",
"all_valid",
".",
"update",
"(",
"set",
"(",
"'- \\n'",
")",
")",
"if",
"all_valid",
".",
"issuperset",
"(",
"all_input_letters",
")",
":",
"return",
"'fasta'",
"raise",
"ValueError",
"(",
"\"Failed to guess format for input\"",
")"
]
| Guess the sequence type from the file's contents | [
"Guess",
"the",
"sequence",
"type",
"from",
"the",
"file",
"s",
"contents"
]
| 3a732d62b4b3cc42675631db886ba534672cb134 | https://github.com/kblin/bioinf-helperlibs/blob/3a732d62b4b3cc42675631db886ba534672cb134/helperlibs/bio/seqio.py#L58-L84 | train |
kblin/bioinf-helperlibs | helperlibs/bio/seqio.py | _unzip_handle | def _unzip_handle(handle):
"""Transparently unzip the file handle"""
if isinstance(handle, basestring):
handle = _gzip_open_filename(handle)
else:
handle = _gzip_open_handle(handle)
return handle | python | def _unzip_handle(handle):
"""Transparently unzip the file handle"""
if isinstance(handle, basestring):
handle = _gzip_open_filename(handle)
else:
handle = _gzip_open_handle(handle)
return handle | [
"def",
"_unzip_handle",
"(",
"handle",
")",
":",
"if",
"isinstance",
"(",
"handle",
",",
"basestring",
")",
":",
"handle",
"=",
"_gzip_open_filename",
"(",
"handle",
")",
"else",
":",
"handle",
"=",
"_gzip_open_handle",
"(",
"handle",
")",
"return",
"handle"
]
| Transparently unzip the file handle | [
"Transparently",
"unzip",
"the",
"file",
"handle"
]
| 3a732d62b4b3cc42675631db886ba534672cb134 | https://github.com/kblin/bioinf-helperlibs/blob/3a732d62b4b3cc42675631db886ba534672cb134/helperlibs/bio/seqio.py#L87-L93 | train |
kblin/bioinf-helperlibs | helperlibs/bio/seqio.py | sanity_check_insdcio | def sanity_check_insdcio(handle, id_marker, fake_id_line):
"""Sanity check for insdcio style files"""
found_id = False
found_end_marker = False
for line in handle:
line = line.strip()
if not line:
continue
if line.startswith(id_marker):
found_id = True
break
if line.startswith('//'):
found_end_marker = True
break
handle.seek(0)
# We found an ID, file looks good.
if found_id:
return handle
# If there's no ID and no end marker, just give up.
if not found_end_marker:
return handle
# If we found an end marker but no ID, fake one.
new_handle = StringIO()
new_handle.write("%s\n" % fake_id_line)
new_handle.write(handle.read())
new_handle.seek(0)
return new_handle | python | def sanity_check_insdcio(handle, id_marker, fake_id_line):
"""Sanity check for insdcio style files"""
found_id = False
found_end_marker = False
for line in handle:
line = line.strip()
if not line:
continue
if line.startswith(id_marker):
found_id = True
break
if line.startswith('//'):
found_end_marker = True
break
handle.seek(0)
# We found an ID, file looks good.
if found_id:
return handle
# If there's no ID and no end marker, just give up.
if not found_end_marker:
return handle
# If we found an end marker but no ID, fake one.
new_handle = StringIO()
new_handle.write("%s\n" % fake_id_line)
new_handle.write(handle.read())
new_handle.seek(0)
return new_handle | [
"def",
"sanity_check_insdcio",
"(",
"handle",
",",
"id_marker",
",",
"fake_id_line",
")",
":",
"found_id",
"=",
"False",
"found_end_marker",
"=",
"False",
"for",
"line",
"in",
"handle",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"not",
"line",
":",
"continue",
"if",
"line",
".",
"startswith",
"(",
"id_marker",
")",
":",
"found_id",
"=",
"True",
"break",
"if",
"line",
".",
"startswith",
"(",
"'//'",
")",
":",
"found_end_marker",
"=",
"True",
"break",
"handle",
".",
"seek",
"(",
"0",
")",
"# We found an ID, file looks good.",
"if",
"found_id",
":",
"return",
"handle",
"# If there's no ID and no end marker, just give up.",
"if",
"not",
"found_end_marker",
":",
"return",
"handle",
"# If we found an end marker but no ID, fake one.",
"new_handle",
"=",
"StringIO",
"(",
")",
"new_handle",
".",
"write",
"(",
"\"%s\\n\"",
"%",
"fake_id_line",
")",
"new_handle",
".",
"write",
"(",
"handle",
".",
"read",
"(",
")",
")",
"new_handle",
".",
"seek",
"(",
"0",
")",
"return",
"new_handle"
]
| Sanity check for insdcio style files | [
"Sanity",
"check",
"for",
"insdcio",
"style",
"files"
]
| 3a732d62b4b3cc42675631db886ba534672cb134 | https://github.com/kblin/bioinf-helperlibs/blob/3a732d62b4b3cc42675631db886ba534672cb134/helperlibs/bio/seqio.py#L117-L146 | train |
kblin/bioinf-helperlibs | helperlibs/bio/seqio.py | sanity_check_fasta | def sanity_check_fasta(handle):
"""Sanity check FASTA files."""
header_found = False
for line in handle:
if line.startswith('>'):
header_found = True
break
handle.seek(0)
if header_found:
return handle
fake_header_line = ">DUMMY"
new_handle = StringIO()
new_handle.write("%s\n" % fake_header_line)
new_handle.write(handle.read())
new_handle.seek(0)
return new_handle | python | def sanity_check_fasta(handle):
"""Sanity check FASTA files."""
header_found = False
for line in handle:
if line.startswith('>'):
header_found = True
break
handle.seek(0)
if header_found:
return handle
fake_header_line = ">DUMMY"
new_handle = StringIO()
new_handle.write("%s\n" % fake_header_line)
new_handle.write(handle.read())
new_handle.seek(0)
return new_handle | [
"def",
"sanity_check_fasta",
"(",
"handle",
")",
":",
"header_found",
"=",
"False",
"for",
"line",
"in",
"handle",
":",
"if",
"line",
".",
"startswith",
"(",
"'>'",
")",
":",
"header_found",
"=",
"True",
"break",
"handle",
".",
"seek",
"(",
"0",
")",
"if",
"header_found",
":",
"return",
"handle",
"fake_header_line",
"=",
"\">DUMMY\"",
"new_handle",
"=",
"StringIO",
"(",
")",
"new_handle",
".",
"write",
"(",
"\"%s\\n\"",
"%",
"fake_header_line",
")",
"new_handle",
".",
"write",
"(",
"handle",
".",
"read",
"(",
")",
")",
"new_handle",
".",
"seek",
"(",
"0",
")",
"return",
"new_handle"
]
| Sanity check FASTA files. | [
"Sanity",
"check",
"FASTA",
"files",
"."
]
| 3a732d62b4b3cc42675631db886ba534672cb134 | https://github.com/kblin/bioinf-helperlibs/blob/3a732d62b4b3cc42675631db886ba534672cb134/helperlibs/bio/seqio.py#L163-L181 | train |
kblin/bioinf-helperlibs | helperlibs/bio/seqio.py | parse | def parse(handle, seqtype=None, robust=False):
'''Wrap SeqIO.parse'''
if seqtype is None:
seqtype = _get_seqtype_from_ext(handle)
if seqtype.startswith('gz-'):
handle = _unzip_handle(handle)
seqtype = seqtype[3:]
# False positive from pylint, both handles are fileobj-like
# pylint: disable=redefined-variable-type
if robust:
if seqtype == "embl":
handle = sanity_check_embl(handle)
elif seqtype == "genbank":
handle = sanity_check_genbank(handle)
elif seqtype == "fasta":
handle = sanity_check_fasta(handle)
# pylint: enable=redefined-variable-type
return SeqIO.parse(handle, seqtype) | python | def parse(handle, seqtype=None, robust=False):
'''Wrap SeqIO.parse'''
if seqtype is None:
seqtype = _get_seqtype_from_ext(handle)
if seqtype.startswith('gz-'):
handle = _unzip_handle(handle)
seqtype = seqtype[3:]
# False positive from pylint, both handles are fileobj-like
# pylint: disable=redefined-variable-type
if robust:
if seqtype == "embl":
handle = sanity_check_embl(handle)
elif seqtype == "genbank":
handle = sanity_check_genbank(handle)
elif seqtype == "fasta":
handle = sanity_check_fasta(handle)
# pylint: enable=redefined-variable-type
return SeqIO.parse(handle, seqtype) | [
"def",
"parse",
"(",
"handle",
",",
"seqtype",
"=",
"None",
",",
"robust",
"=",
"False",
")",
":",
"if",
"seqtype",
"is",
"None",
":",
"seqtype",
"=",
"_get_seqtype_from_ext",
"(",
"handle",
")",
"if",
"seqtype",
".",
"startswith",
"(",
"'gz-'",
")",
":",
"handle",
"=",
"_unzip_handle",
"(",
"handle",
")",
"seqtype",
"=",
"seqtype",
"[",
"3",
":",
"]",
"# False positive from pylint, both handles are fileobj-like",
"# pylint: disable=redefined-variable-type",
"if",
"robust",
":",
"if",
"seqtype",
"==",
"\"embl\"",
":",
"handle",
"=",
"sanity_check_embl",
"(",
"handle",
")",
"elif",
"seqtype",
"==",
"\"genbank\"",
":",
"handle",
"=",
"sanity_check_genbank",
"(",
"handle",
")",
"elif",
"seqtype",
"==",
"\"fasta\"",
":",
"handle",
"=",
"sanity_check_fasta",
"(",
"handle",
")",
"# pylint: enable=redefined-variable-type",
"return",
"SeqIO",
".",
"parse",
"(",
"handle",
",",
"seqtype",
")"
]
| Wrap SeqIO.parse | [
"Wrap",
"SeqIO",
".",
"parse"
]
| 3a732d62b4b3cc42675631db886ba534672cb134 | https://github.com/kblin/bioinf-helperlibs/blob/3a732d62b4b3cc42675631db886ba534672cb134/helperlibs/bio/seqio.py#L184-L204 | train |
Nic30/hwtGraph | hwtGraph/elk/containers/constants.py | PortConstraints.isOrderFixed | def isOrderFixed(self):
"""
Returns whether the order of ports is fixed.
@return true if the order of ports is fixed
"""
return (self == PortConstraints.FIXED_ORDER
or self == PortConstraints.FIXED_RATIO
or self == PortConstraints.FIXED_POS) | python | def isOrderFixed(self):
"""
Returns whether the order of ports is fixed.
@return true if the order of ports is fixed
"""
return (self == PortConstraints.FIXED_ORDER
or self == PortConstraints.FIXED_RATIO
or self == PortConstraints.FIXED_POS) | [
"def",
"isOrderFixed",
"(",
"self",
")",
":",
"return",
"(",
"self",
"==",
"PortConstraints",
".",
"FIXED_ORDER",
"or",
"self",
"==",
"PortConstraints",
".",
"FIXED_RATIO",
"or",
"self",
"==",
"PortConstraints",
".",
"FIXED_POS",
")"
]
| Returns whether the order of ports is fixed.
@return true if the order of ports is fixed | [
"Returns",
"whether",
"the",
"order",
"of",
"ports",
"is",
"fixed",
"."
]
| 6b7d4fdd759f263a0fdd2736f02f123e44e4354f | https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/containers/constants.py#L98-L106 | train |
testedminds/sand | sand/graph.py | _dicts_to_columns | def _dicts_to_columns(dicts):
"""
Given a List of Dictionaries with uniform keys, returns a single Dictionary
with keys holding a List of values matching the key in the original List.
[{'name': 'Field Museum', 'location': 'Chicago'},
{'name': 'Epcot', 'location': 'Orlando'}]
=>
{'name': ['Field Museum', 'Epcot'],
'location': ['Chicago', 'Orlando']}
"""
keys = dicts[0].keys()
result = dict((k, []) for k in keys)
for d in dicts:
for k, v in d.items():
result[k] += [v]
return result | python | def _dicts_to_columns(dicts):
"""
Given a List of Dictionaries with uniform keys, returns a single Dictionary
with keys holding a List of values matching the key in the original List.
[{'name': 'Field Museum', 'location': 'Chicago'},
{'name': 'Epcot', 'location': 'Orlando'}]
=>
{'name': ['Field Museum', 'Epcot'],
'location': ['Chicago', 'Orlando']}
"""
keys = dicts[0].keys()
result = dict((k, []) for k in keys)
for d in dicts:
for k, v in d.items():
result[k] += [v]
return result | [
"def",
"_dicts_to_columns",
"(",
"dicts",
")",
":",
"keys",
"=",
"dicts",
"[",
"0",
"]",
".",
"keys",
"(",
")",
"result",
"=",
"dict",
"(",
"(",
"k",
",",
"[",
"]",
")",
"for",
"k",
"in",
"keys",
")",
"for",
"d",
"in",
"dicts",
":",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"result",
"[",
"k",
"]",
"+=",
"[",
"v",
"]",
"return",
"result"
]
| Given a List of Dictionaries with uniform keys, returns a single Dictionary
with keys holding a List of values matching the key in the original List.
[{'name': 'Field Museum', 'location': 'Chicago'},
{'name': 'Epcot', 'location': 'Orlando'}]
=>
{'name': ['Field Museum', 'Epcot'],
'location': ['Chicago', 'Orlando']} | [
"Given",
"a",
"List",
"of",
"Dictionaries",
"with",
"uniform",
"keys",
"returns",
"a",
"single",
"Dictionary",
"with",
"keys",
"holding",
"a",
"List",
"of",
"values",
"matching",
"the",
"key",
"in",
"the",
"original",
"List",
"."
]
| 234f0eedb0742920cdf26da9bc84bf3f863a2f02 | https://github.com/testedminds/sand/blob/234f0eedb0742920cdf26da9bc84bf3f863a2f02/sand/graph.py#L25-L43 | train |
testedminds/sand | sand/graph.py | from_vertices_and_edges | def from_vertices_and_edges(vertices, edges, vertex_name_key='name', vertex_id_key='id',
edge_foreign_keys=('source', 'target'), directed=True):
"""
This representation assumes that vertices and edges are encoded in
two lists, each list containing a Python dict for each vertex and
each edge, respectively. A distinguished element of the vertex dicts
contain a vertex ID which is used in the edge dicts to refer to
source and target vertices. All the remaining elements of the dicts
are considered vertex and edge attributes.
@param vertices: a list of dicts for the vertices.
@param edges: a list of dicts for the edges.
@param vertex_name_key: the name of the distinguished key in the dicts
in the vertex data source that contains the vertex names. Will also be used
as vertex label.
@param vertex_id_key: the name of the distinguished key in the dicts
in the vertex data source that contains a unique identifier for the vertex.
@param edge_foreign_keys: the name of the attributes in the dicts in C{edges}
that contain the source and target vertex names.
@return: IGraph instance with integers for vertex ids, edge sources, and edge targets.
"""
vertex_data = _dicts_to_columns(vertices)
edge_data = _dicts_to_columns(edges)
n = len(vertices)
vertex_index = dict(zip(vertex_data[vertex_id_key], range(n)))
# Iterate over `edges` to create `edge_list`, where every list item is a pair of integers.
edge_list = list(map(lambda source, target: (vertex_index[source], vertex_index[target]),
edge_data[edge_foreign_keys[0]],
edge_data[edge_foreign_keys[1]]))
g = IGraph(n=n, edges=edge_list, directed=directed, vertex_attrs=vertex_data, edge_attrs=edge_data)
g.vs['name'] = g.vs[vertex_name_key]
g.vs['indegree'] = g.degree(mode="in")
g.vs['outdegree'] = g.degree(mode="out")
g.vs['label'] = g.vs[vertex_name_key]
if 'group' not in g.vs.attributes():
g.vs['group'] = labels_to_groups(g.vs['label'])
return g | python | def from_vertices_and_edges(vertices, edges, vertex_name_key='name', vertex_id_key='id',
edge_foreign_keys=('source', 'target'), directed=True):
"""
This representation assumes that vertices and edges are encoded in
two lists, each list containing a Python dict for each vertex and
each edge, respectively. A distinguished element of the vertex dicts
contain a vertex ID which is used in the edge dicts to refer to
source and target vertices. All the remaining elements of the dicts
are considered vertex and edge attributes.
@param vertices: a list of dicts for the vertices.
@param edges: a list of dicts for the edges.
@param vertex_name_key: the name of the distinguished key in the dicts
in the vertex data source that contains the vertex names. Will also be used
as vertex label.
@param vertex_id_key: the name of the distinguished key in the dicts
in the vertex data source that contains a unique identifier for the vertex.
@param edge_foreign_keys: the name of the attributes in the dicts in C{edges}
that contain the source and target vertex names.
@return: IGraph instance with integers for vertex ids, edge sources, and edge targets.
"""
vertex_data = _dicts_to_columns(vertices)
edge_data = _dicts_to_columns(edges)
n = len(vertices)
vertex_index = dict(zip(vertex_data[vertex_id_key], range(n)))
# Iterate over `edges` to create `edge_list`, where every list item is a pair of integers.
edge_list = list(map(lambda source, target: (vertex_index[source], vertex_index[target]),
edge_data[edge_foreign_keys[0]],
edge_data[edge_foreign_keys[1]]))
g = IGraph(n=n, edges=edge_list, directed=directed, vertex_attrs=vertex_data, edge_attrs=edge_data)
g.vs['name'] = g.vs[vertex_name_key]
g.vs['indegree'] = g.degree(mode="in")
g.vs['outdegree'] = g.degree(mode="out")
g.vs['label'] = g.vs[vertex_name_key]
if 'group' not in g.vs.attributes():
g.vs['group'] = labels_to_groups(g.vs['label'])
return g | [
"def",
"from_vertices_and_edges",
"(",
"vertices",
",",
"edges",
",",
"vertex_name_key",
"=",
"'name'",
",",
"vertex_id_key",
"=",
"'id'",
",",
"edge_foreign_keys",
"=",
"(",
"'source'",
",",
"'target'",
")",
",",
"directed",
"=",
"True",
")",
":",
"vertex_data",
"=",
"_dicts_to_columns",
"(",
"vertices",
")",
"edge_data",
"=",
"_dicts_to_columns",
"(",
"edges",
")",
"n",
"=",
"len",
"(",
"vertices",
")",
"vertex_index",
"=",
"dict",
"(",
"zip",
"(",
"vertex_data",
"[",
"vertex_id_key",
"]",
",",
"range",
"(",
"n",
")",
")",
")",
"# Iterate over `edges` to create `edge_list`, where every list item is a pair of integers.",
"edge_list",
"=",
"list",
"(",
"map",
"(",
"lambda",
"source",
",",
"target",
":",
"(",
"vertex_index",
"[",
"source",
"]",
",",
"vertex_index",
"[",
"target",
"]",
")",
",",
"edge_data",
"[",
"edge_foreign_keys",
"[",
"0",
"]",
"]",
",",
"edge_data",
"[",
"edge_foreign_keys",
"[",
"1",
"]",
"]",
")",
")",
"g",
"=",
"IGraph",
"(",
"n",
"=",
"n",
",",
"edges",
"=",
"edge_list",
",",
"directed",
"=",
"directed",
",",
"vertex_attrs",
"=",
"vertex_data",
",",
"edge_attrs",
"=",
"edge_data",
")",
"g",
".",
"vs",
"[",
"'name'",
"]",
"=",
"g",
".",
"vs",
"[",
"vertex_name_key",
"]",
"g",
".",
"vs",
"[",
"'indegree'",
"]",
"=",
"g",
".",
"degree",
"(",
"mode",
"=",
"\"in\"",
")",
"g",
".",
"vs",
"[",
"'outdegree'",
"]",
"=",
"g",
".",
"degree",
"(",
"mode",
"=",
"\"out\"",
")",
"g",
".",
"vs",
"[",
"'label'",
"]",
"=",
"g",
".",
"vs",
"[",
"vertex_name_key",
"]",
"if",
"'group'",
"not",
"in",
"g",
".",
"vs",
".",
"attributes",
"(",
")",
":",
"g",
".",
"vs",
"[",
"'group'",
"]",
"=",
"labels_to_groups",
"(",
"g",
".",
"vs",
"[",
"'label'",
"]",
")",
"return",
"g"
]
| This representation assumes that vertices and edges are encoded in
two lists, each list containing a Python dict for each vertex and
each edge, respectively. A distinguished element of the vertex dicts
contain a vertex ID which is used in the edge dicts to refer to
source and target vertices. All the remaining elements of the dicts
are considered vertex and edge attributes.
@param vertices: a list of dicts for the vertices.
@param edges: a list of dicts for the edges.
@param vertex_name_key: the name of the distinguished key in the dicts
in the vertex data source that contains the vertex names. Will also be used
as vertex label.
@param vertex_id_key: the name of the distinguished key in the dicts
in the vertex data source that contains a unique identifier for the vertex.
@param edge_foreign_keys: the name of the attributes in the dicts in C{edges}
that contain the source and target vertex names.
@return: IGraph instance with integers for vertex ids, edge sources, and edge targets. | [
"This",
"representation",
"assumes",
"that",
"vertices",
"and",
"edges",
"are",
"encoded",
"in",
"two",
"lists",
"each",
"list",
"containing",
"a",
"Python",
"dict",
"for",
"each",
"vertex",
"and",
"each",
"edge",
"respectively",
".",
"A",
"distinguished",
"element",
"of",
"the",
"vertex",
"dicts",
"contain",
"a",
"vertex",
"ID",
"which",
"is",
"used",
"in",
"the",
"edge",
"dicts",
"to",
"refer",
"to",
"source",
"and",
"target",
"vertices",
".",
"All",
"the",
"remaining",
"elements",
"of",
"the",
"dicts",
"are",
"considered",
"vertex",
"and",
"edge",
"attributes",
"."
]
| 234f0eedb0742920cdf26da9bc84bf3f863a2f02 | https://github.com/testedminds/sand/blob/234f0eedb0742920cdf26da9bc84bf3f863a2f02/sand/graph.py#L46-L84 | train |
testedminds/sand | sand/graph.py | from_edges | def from_edges(edges, source_key='source', target_key='target', weight_key='weight', directed=True):
"""
Given a List of Dictionaries with source, target, and weight attributes, return a weighted, directed graph.
"""
raw = list(map(lambda x: [x[source_key], x[target_key], int(x[weight_key])], edges))
g = IGraph.TupleList(raw, weights=True, directed=directed)
g.vs['indegree'] = g.degree(mode="in")
g.vs['outdegree'] = g.degree(mode="out")
g.vs['label'] = g.vs['name']
if 'group' not in g.vs.attributes():
g.vs['group'] = labels_to_groups(g.vs['label'])
return g | python | def from_edges(edges, source_key='source', target_key='target', weight_key='weight', directed=True):
"""
Given a List of Dictionaries with source, target, and weight attributes, return a weighted, directed graph.
"""
raw = list(map(lambda x: [x[source_key], x[target_key], int(x[weight_key])], edges))
g = IGraph.TupleList(raw, weights=True, directed=directed)
g.vs['indegree'] = g.degree(mode="in")
g.vs['outdegree'] = g.degree(mode="out")
g.vs['label'] = g.vs['name']
if 'group' not in g.vs.attributes():
g.vs['group'] = labels_to_groups(g.vs['label'])
return g | [
"def",
"from_edges",
"(",
"edges",
",",
"source_key",
"=",
"'source'",
",",
"target_key",
"=",
"'target'",
",",
"weight_key",
"=",
"'weight'",
",",
"directed",
"=",
"True",
")",
":",
"raw",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"[",
"x",
"[",
"source_key",
"]",
",",
"x",
"[",
"target_key",
"]",
",",
"int",
"(",
"x",
"[",
"weight_key",
"]",
")",
"]",
",",
"edges",
")",
")",
"g",
"=",
"IGraph",
".",
"TupleList",
"(",
"raw",
",",
"weights",
"=",
"True",
",",
"directed",
"=",
"directed",
")",
"g",
".",
"vs",
"[",
"'indegree'",
"]",
"=",
"g",
".",
"degree",
"(",
"mode",
"=",
"\"in\"",
")",
"g",
".",
"vs",
"[",
"'outdegree'",
"]",
"=",
"g",
".",
"degree",
"(",
"mode",
"=",
"\"out\"",
")",
"g",
".",
"vs",
"[",
"'label'",
"]",
"=",
"g",
".",
"vs",
"[",
"'name'",
"]",
"if",
"'group'",
"not",
"in",
"g",
".",
"vs",
".",
"attributes",
"(",
")",
":",
"g",
".",
"vs",
"[",
"'group'",
"]",
"=",
"labels_to_groups",
"(",
"g",
".",
"vs",
"[",
"'label'",
"]",
")",
"return",
"g"
]
| Given a List of Dictionaries with source, target, and weight attributes, return a weighted, directed graph. | [
"Given",
"a",
"List",
"of",
"Dictionaries",
"with",
"source",
"target",
"and",
"weight",
"attributes",
"return",
"a",
"weighted",
"directed",
"graph",
"."
]
| 234f0eedb0742920cdf26da9bc84bf3f863a2f02 | https://github.com/testedminds/sand/blob/234f0eedb0742920cdf26da9bc84bf3f863a2f02/sand/graph.py#L87-L98 | train |
pgxcentre/geneparse | geneparse/utils.py | flip_alleles | def flip_alleles(genotypes):
"""Flip the alleles of an Genotypes instance."""
warnings.warn("deprecated: use 'Genotypes.flip_coded'", DeprecationWarning)
genotypes.reference, genotypes.coded = (genotypes.coded,
genotypes.reference)
genotypes.genotypes = 2 - genotypes.genotypes
return genotypes | python | def flip_alleles(genotypes):
"""Flip the alleles of an Genotypes instance."""
warnings.warn("deprecated: use 'Genotypes.flip_coded'", DeprecationWarning)
genotypes.reference, genotypes.coded = (genotypes.coded,
genotypes.reference)
genotypes.genotypes = 2 - genotypes.genotypes
return genotypes | [
"def",
"flip_alleles",
"(",
"genotypes",
")",
":",
"warnings",
".",
"warn",
"(",
"\"deprecated: use 'Genotypes.flip_coded'\"",
",",
"DeprecationWarning",
")",
"genotypes",
".",
"reference",
",",
"genotypes",
".",
"coded",
"=",
"(",
"genotypes",
".",
"coded",
",",
"genotypes",
".",
"reference",
")",
"genotypes",
".",
"genotypes",
"=",
"2",
"-",
"genotypes",
".",
"genotypes",
"return",
"genotypes"
]
| Flip the alleles of an Genotypes instance. | [
"Flip",
"the",
"alleles",
"of",
"an",
"Genotypes",
"instance",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/utils.py#L45-L51 | train |
pgxcentre/geneparse | geneparse/utils.py | code_minor | def code_minor(genotypes):
"""Encode the genotypes with respect to the minor allele.
This confirms that "reference" is the major allele and that "coded" is
the minor allele.
In other words, this function can be used to make sure that the genotype
value is the number of minor alleles for an individual.
"""
warnings.warn("deprecated: use 'Genotypes.code_minor'", DeprecationWarning)
_, minor_coded = maf(genotypes)
if not minor_coded:
return flip_alleles(genotypes)
return genotypes | python | def code_minor(genotypes):
"""Encode the genotypes with respect to the minor allele.
This confirms that "reference" is the major allele and that "coded" is
the minor allele.
In other words, this function can be used to make sure that the genotype
value is the number of minor alleles for an individual.
"""
warnings.warn("deprecated: use 'Genotypes.code_minor'", DeprecationWarning)
_, minor_coded = maf(genotypes)
if not minor_coded:
return flip_alleles(genotypes)
return genotypes | [
"def",
"code_minor",
"(",
"genotypes",
")",
":",
"warnings",
".",
"warn",
"(",
"\"deprecated: use 'Genotypes.code_minor'\"",
",",
"DeprecationWarning",
")",
"_",
",",
"minor_coded",
"=",
"maf",
"(",
"genotypes",
")",
"if",
"not",
"minor_coded",
":",
"return",
"flip_alleles",
"(",
"genotypes",
")",
"return",
"genotypes"
]
| Encode the genotypes with respect to the minor allele.
This confirms that "reference" is the major allele and that "coded" is
the minor allele.
In other words, this function can be used to make sure that the genotype
value is the number of minor alleles for an individual. | [
"Encode",
"the",
"genotypes",
"with",
"respect",
"to",
"the",
"minor",
"allele",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/utils.py#L54-L69 | train |
pgxcentre/geneparse | geneparse/utils.py | maf | def maf(genotypes):
"""Computes the MAF and returns a boolean indicating if the minor allele
is currently the coded allele.
"""
warnings.warn("deprecated: use 'Genotypes.maf'", DeprecationWarning)
g = genotypes.genotypes
maf = np.nansum(g) / (2 * np.sum(~np.isnan(g)))
if maf > 0.5:
maf = 1 - maf
return maf, False
return maf, True | python | def maf(genotypes):
"""Computes the MAF and returns a boolean indicating if the minor allele
is currently the coded allele.
"""
warnings.warn("deprecated: use 'Genotypes.maf'", DeprecationWarning)
g = genotypes.genotypes
maf = np.nansum(g) / (2 * np.sum(~np.isnan(g)))
if maf > 0.5:
maf = 1 - maf
return maf, False
return maf, True | [
"def",
"maf",
"(",
"genotypes",
")",
":",
"warnings",
".",
"warn",
"(",
"\"deprecated: use 'Genotypes.maf'\"",
",",
"DeprecationWarning",
")",
"g",
"=",
"genotypes",
".",
"genotypes",
"maf",
"=",
"np",
".",
"nansum",
"(",
"g",
")",
"/",
"(",
"2",
"*",
"np",
".",
"sum",
"(",
"~",
"np",
".",
"isnan",
"(",
"g",
")",
")",
")",
"if",
"maf",
">",
"0.5",
":",
"maf",
"=",
"1",
"-",
"maf",
"return",
"maf",
",",
"False",
"return",
"maf",
",",
"True"
]
| Computes the MAF and returns a boolean indicating if the minor allele
is currently the coded allele. | [
"Computes",
"the",
"MAF",
"and",
"returns",
"a",
"boolean",
"indicating",
"if",
"the",
"minor",
"allele",
"is",
"currently",
"the",
"coded",
"allele",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/utils.py#L72-L84 | train |
pgxcentre/geneparse | geneparse/utils.py | genotype_to_df | def genotype_to_df(g, samples, as_string=False):
"""Convert a genotype object to a pandas dataframe.
By default, the encoded values are stored, but the as_string argument can
be used to represent it as characters (alleles) instead.
"""
name = g.variant.name if g.variant.name else "genotypes"
df = pd.DataFrame(g.genotypes, index=samples, columns=[name])
if as_string:
df["alleles"] = None
hard_calls = df[name].round()
df.loc[hard_calls == 0, "alleles"] = "{0}/{0}".format(g.reference)
df.loc[hard_calls == 1, "alleles"] = "{0}/{1}".format(g.reference,
g.coded)
df.loc[hard_calls == 2, "alleles"] = "{0}/{0}".format(g.coded)
df = df[["alleles"]]
df.columns = [name]
return df | python | def genotype_to_df(g, samples, as_string=False):
"""Convert a genotype object to a pandas dataframe.
By default, the encoded values are stored, but the as_string argument can
be used to represent it as characters (alleles) instead.
"""
name = g.variant.name if g.variant.name else "genotypes"
df = pd.DataFrame(g.genotypes, index=samples, columns=[name])
if as_string:
df["alleles"] = None
hard_calls = df[name].round()
df.loc[hard_calls == 0, "alleles"] = "{0}/{0}".format(g.reference)
df.loc[hard_calls == 1, "alleles"] = "{0}/{1}".format(g.reference,
g.coded)
df.loc[hard_calls == 2, "alleles"] = "{0}/{0}".format(g.coded)
df = df[["alleles"]]
df.columns = [name]
return df | [
"def",
"genotype_to_df",
"(",
"g",
",",
"samples",
",",
"as_string",
"=",
"False",
")",
":",
"name",
"=",
"g",
".",
"variant",
".",
"name",
"if",
"g",
".",
"variant",
".",
"name",
"else",
"\"genotypes\"",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"g",
".",
"genotypes",
",",
"index",
"=",
"samples",
",",
"columns",
"=",
"[",
"name",
"]",
")",
"if",
"as_string",
":",
"df",
"[",
"\"alleles\"",
"]",
"=",
"None",
"hard_calls",
"=",
"df",
"[",
"name",
"]",
".",
"round",
"(",
")",
"df",
".",
"loc",
"[",
"hard_calls",
"==",
"0",
",",
"\"alleles\"",
"]",
"=",
"\"{0}/{0}\"",
".",
"format",
"(",
"g",
".",
"reference",
")",
"df",
".",
"loc",
"[",
"hard_calls",
"==",
"1",
",",
"\"alleles\"",
"]",
"=",
"\"{0}/{1}\"",
".",
"format",
"(",
"g",
".",
"reference",
",",
"g",
".",
"coded",
")",
"df",
".",
"loc",
"[",
"hard_calls",
"==",
"2",
",",
"\"alleles\"",
"]",
"=",
"\"{0}/{0}\"",
".",
"format",
"(",
"g",
".",
"coded",
")",
"df",
"=",
"df",
"[",
"[",
"\"alleles\"",
"]",
"]",
"df",
".",
"columns",
"=",
"[",
"name",
"]",
"return",
"df"
]
| Convert a genotype object to a pandas dataframe.
By default, the encoded values are stored, but the as_string argument can
be used to represent it as characters (alleles) instead. | [
"Convert",
"a",
"genotype",
"object",
"to",
"a",
"pandas",
"dataframe",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/utils.py#L133-L155 | train |
pgxcentre/geneparse | geneparse/utils.py | compute_ld | def compute_ld(cur_geno, other_genotypes, r2=False):
"""Compute LD between a marker and a list of markers.
Args:
cur_geno (Genotypes): The genotypes of the marker.
other_genotypes (list): A list of genotypes.
Returns:
numpy.array: An array containing the r or r**2 values between cur_geno
and other_genotypes.
Note:
The genotypes will automatically be normalized using (x - mean) / std.
"""
# Normalizing the current genotypes
norm_cur = normalize_genotypes(cur_geno)
# Normalizing and creating the matrix for the other genotypes
norm_others = np.stack(
tuple(normalize_genotypes(g) for g in other_genotypes),
axis=1,
)
# Making sure the size is the same
assert norm_cur.shape[0] == norm_others.shape[0]
# Getting the number of "samples" per marker (taking into account NaN)
n = (
~np.isnan(norm_cur.reshape(norm_cur.shape[0], 1)) *
~np.isnan(norm_others)
).sum(axis=0)
# Computing r (replacing NaN by 0)
r = pd.Series(
np.dot(
np.nan_to_num(norm_cur), np.nan_to_num(norm_others) / n
),
index=[g.variant.name for g in other_genotypes],
name="r2" if r2 else "r",
)
# Checking no "invalid" values (i.e. < -1 or > 1)
r.loc[r > 1] = 1
r.loc[r < -1] = -1
if r2:
return r ** 2
else:
return r | python | def compute_ld(cur_geno, other_genotypes, r2=False):
"""Compute LD between a marker and a list of markers.
Args:
cur_geno (Genotypes): The genotypes of the marker.
other_genotypes (list): A list of genotypes.
Returns:
numpy.array: An array containing the r or r**2 values between cur_geno
and other_genotypes.
Note:
The genotypes will automatically be normalized using (x - mean) / std.
"""
# Normalizing the current genotypes
norm_cur = normalize_genotypes(cur_geno)
# Normalizing and creating the matrix for the other genotypes
norm_others = np.stack(
tuple(normalize_genotypes(g) for g in other_genotypes),
axis=1,
)
# Making sure the size is the same
assert norm_cur.shape[0] == norm_others.shape[0]
# Getting the number of "samples" per marker (taking into account NaN)
n = (
~np.isnan(norm_cur.reshape(norm_cur.shape[0], 1)) *
~np.isnan(norm_others)
).sum(axis=0)
# Computing r (replacing NaN by 0)
r = pd.Series(
np.dot(
np.nan_to_num(norm_cur), np.nan_to_num(norm_others) / n
),
index=[g.variant.name for g in other_genotypes],
name="r2" if r2 else "r",
)
# Checking no "invalid" values (i.e. < -1 or > 1)
r.loc[r > 1] = 1
r.loc[r < -1] = -1
if r2:
return r ** 2
else:
return r | [
"def",
"compute_ld",
"(",
"cur_geno",
",",
"other_genotypes",
",",
"r2",
"=",
"False",
")",
":",
"# Normalizing the current genotypes",
"norm_cur",
"=",
"normalize_genotypes",
"(",
"cur_geno",
")",
"# Normalizing and creating the matrix for the other genotypes",
"norm_others",
"=",
"np",
".",
"stack",
"(",
"tuple",
"(",
"normalize_genotypes",
"(",
"g",
")",
"for",
"g",
"in",
"other_genotypes",
")",
",",
"axis",
"=",
"1",
",",
")",
"# Making sure the size is the same",
"assert",
"norm_cur",
".",
"shape",
"[",
"0",
"]",
"==",
"norm_others",
".",
"shape",
"[",
"0",
"]",
"# Getting the number of \"samples\" per marker (taking into account NaN)",
"n",
"=",
"(",
"~",
"np",
".",
"isnan",
"(",
"norm_cur",
".",
"reshape",
"(",
"norm_cur",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
"*",
"~",
"np",
".",
"isnan",
"(",
"norm_others",
")",
")",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"# Computing r (replacing NaN by 0)",
"r",
"=",
"pd",
".",
"Series",
"(",
"np",
".",
"dot",
"(",
"np",
".",
"nan_to_num",
"(",
"norm_cur",
")",
",",
"np",
".",
"nan_to_num",
"(",
"norm_others",
")",
"/",
"n",
")",
",",
"index",
"=",
"[",
"g",
".",
"variant",
".",
"name",
"for",
"g",
"in",
"other_genotypes",
"]",
",",
"name",
"=",
"\"r2\"",
"if",
"r2",
"else",
"\"r\"",
",",
")",
"# Checking no \"invalid\" values (i.e. < -1 or > 1)",
"r",
".",
"loc",
"[",
"r",
">",
"1",
"]",
"=",
"1",
"r",
".",
"loc",
"[",
"r",
"<",
"-",
"1",
"]",
"=",
"-",
"1",
"if",
"r2",
":",
"return",
"r",
"**",
"2",
"else",
":",
"return",
"r"
]
| Compute LD between a marker and a list of markers.
Args:
cur_geno (Genotypes): The genotypes of the marker.
other_genotypes (list): A list of genotypes.
Returns:
numpy.array: An array containing the r or r**2 values between cur_geno
and other_genotypes.
Note:
The genotypes will automatically be normalized using (x - mean) / std. | [
"Compute",
"LD",
"between",
"a",
"marker",
"and",
"a",
"list",
"of",
"markers",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/utils.py#L158-L207 | train |
pgxcentre/geneparse | geneparse/utils.py | normalize_genotypes | def normalize_genotypes(genotypes):
"""Normalize the genotypes.
Args:
genotypes (Genotypes): The genotypes to normalize.
Returns:
numpy.array: The normalized genotypes.
"""
genotypes = genotypes.genotypes
return (genotypes - np.nanmean(genotypes)) / np.nanstd(genotypes) | python | def normalize_genotypes(genotypes):
"""Normalize the genotypes.
Args:
genotypes (Genotypes): The genotypes to normalize.
Returns:
numpy.array: The normalized genotypes.
"""
genotypes = genotypes.genotypes
return (genotypes - np.nanmean(genotypes)) / np.nanstd(genotypes) | [
"def",
"normalize_genotypes",
"(",
"genotypes",
")",
":",
"genotypes",
"=",
"genotypes",
".",
"genotypes",
"return",
"(",
"genotypes",
"-",
"np",
".",
"nanmean",
"(",
"genotypes",
")",
")",
"/",
"np",
".",
"nanstd",
"(",
"genotypes",
")"
]
| Normalize the genotypes.
Args:
genotypes (Genotypes): The genotypes to normalize.
Returns:
numpy.array: The normalized genotypes. | [
"Normalize",
"the",
"genotypes",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/utils.py#L210-L221 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/interface.py | crmod_interface._get_tdm | def _get_tdm(self, m):
"""For a given model, return a tdMan instance
Parameters
----------
m : ndarray
Model parameters (linear, ohm m)
"""
m = np.atleast_2d(m)
assert len(m.shape) == 2
tdm = crtomo.tdMan(grid=self.grid, tempdir=self.tempdir)
tdm.configs.add_to_configs(self.configs)
pid_mag = tdm.parman.add_data(m[0, :])
tdm.register_magnitude_model(pid_mag)
if m.shape[0] == 2:
pid_pha = tdm.parman.add_data(m[1, :])
else:
pid_pha = tdm.parman.add_data(np.zeros(m.shape[1]))
tdm.register_phase_model(pid_pha)
return tdm | python | def _get_tdm(self, m):
"""For a given model, return a tdMan instance
Parameters
----------
m : ndarray
Model parameters (linear, ohm m)
"""
m = np.atleast_2d(m)
assert len(m.shape) == 2
tdm = crtomo.tdMan(grid=self.grid, tempdir=self.tempdir)
tdm.configs.add_to_configs(self.configs)
pid_mag = tdm.parman.add_data(m[0, :])
tdm.register_magnitude_model(pid_mag)
if m.shape[0] == 2:
pid_pha = tdm.parman.add_data(m[1, :])
else:
pid_pha = tdm.parman.add_data(np.zeros(m.shape[1]))
tdm.register_phase_model(pid_pha)
return tdm | [
"def",
"_get_tdm",
"(",
"self",
",",
"m",
")",
":",
"m",
"=",
"np",
".",
"atleast_2d",
"(",
"m",
")",
"assert",
"len",
"(",
"m",
".",
"shape",
")",
"==",
"2",
"tdm",
"=",
"crtomo",
".",
"tdMan",
"(",
"grid",
"=",
"self",
".",
"grid",
",",
"tempdir",
"=",
"self",
".",
"tempdir",
")",
"tdm",
".",
"configs",
".",
"add_to_configs",
"(",
"self",
".",
"configs",
")",
"pid_mag",
"=",
"tdm",
".",
"parman",
".",
"add_data",
"(",
"m",
"[",
"0",
",",
":",
"]",
")",
"tdm",
".",
"register_magnitude_model",
"(",
"pid_mag",
")",
"if",
"m",
".",
"shape",
"[",
"0",
"]",
"==",
"2",
":",
"pid_pha",
"=",
"tdm",
".",
"parman",
".",
"add_data",
"(",
"m",
"[",
"1",
",",
":",
"]",
")",
"else",
":",
"pid_pha",
"=",
"tdm",
".",
"parman",
".",
"add_data",
"(",
"np",
".",
"zeros",
"(",
"m",
".",
"shape",
"[",
"1",
"]",
")",
")",
"tdm",
".",
"register_phase_model",
"(",
"pid_pha",
")",
"return",
"tdm"
]
| For a given model, return a tdMan instance
Parameters
----------
m : ndarray
Model parameters (linear, ohm m) | [
"For",
"a",
"given",
"model",
"return",
"a",
"tdMan",
"instance"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/interface.py#L43-L63 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/interface.py | crmod_interface.J | def J(self, log_sigma):
"""Return the sensitivity matrix
Parameters
----------
log_sigma : numpy.ndarray
log_e conductivities
"""
m = 1.0 / np.exp(log_sigma)
tdm = self._get_tdm(m)
tdm.model(
sensitivities=True,
# output_directory=stage_dir + 'modeling',
)
measurements = tdm.measurements()
# build up the sensitivity matrix
sens_list = []
for config_nr, cids in sorted(
tdm.assignments['sensitivities'].items()):
sens_list.append(tdm.parman.parsets[cids[0]])
sensitivities_lin = np.array(sens_list)
# now convert to the log-sensitivities relevant for CRTomo and the
# resolution matrix
sensitivities_log = sensitivities_lin
# multiply measurements on first dimension
measurements_rep = np.repeat(
measurements[:, 0, np.newaxis],
sensitivities_lin.shape[1],
axis=1)
# sensitivities_log = sensitivities_log * mfit
# multiply resistivities on second dimension
m_rep = np.repeat(
m[np.newaxis, :], sensitivities_lin.shape[0], axis=0
)
# eq. 3.41 in Kemna, 2000: notice that m_rep here is in rho, not sigma
factor = - 1 / (m_rep * measurements_rep)
sensitivities_log = factor * sensitivities_lin
# import IPython
# IPython.embed()
return sensitivities_log | python | def J(self, log_sigma):
"""Return the sensitivity matrix
Parameters
----------
log_sigma : numpy.ndarray
log_e conductivities
"""
m = 1.0 / np.exp(log_sigma)
tdm = self._get_tdm(m)
tdm.model(
sensitivities=True,
# output_directory=stage_dir + 'modeling',
)
measurements = tdm.measurements()
# build up the sensitivity matrix
sens_list = []
for config_nr, cids in sorted(
tdm.assignments['sensitivities'].items()):
sens_list.append(tdm.parman.parsets[cids[0]])
sensitivities_lin = np.array(sens_list)
# now convert to the log-sensitivities relevant for CRTomo and the
# resolution matrix
sensitivities_log = sensitivities_lin
# multiply measurements on first dimension
measurements_rep = np.repeat(
measurements[:, 0, np.newaxis],
sensitivities_lin.shape[1],
axis=1)
# sensitivities_log = sensitivities_log * mfit
# multiply resistivities on second dimension
m_rep = np.repeat(
m[np.newaxis, :], sensitivities_lin.shape[0], axis=0
)
# eq. 3.41 in Kemna, 2000: notice that m_rep here is in rho, not sigma
factor = - 1 / (m_rep * measurements_rep)
sensitivities_log = factor * sensitivities_lin
# import IPython
# IPython.embed()
return sensitivities_log | [
"def",
"J",
"(",
"self",
",",
"log_sigma",
")",
":",
"m",
"=",
"1.0",
"/",
"np",
".",
"exp",
"(",
"log_sigma",
")",
"tdm",
"=",
"self",
".",
"_get_tdm",
"(",
"m",
")",
"tdm",
".",
"model",
"(",
"sensitivities",
"=",
"True",
",",
"# output_directory=stage_dir + 'modeling',",
")",
"measurements",
"=",
"tdm",
".",
"measurements",
"(",
")",
"# build up the sensitivity matrix",
"sens_list",
"=",
"[",
"]",
"for",
"config_nr",
",",
"cids",
"in",
"sorted",
"(",
"tdm",
".",
"assignments",
"[",
"'sensitivities'",
"]",
".",
"items",
"(",
")",
")",
":",
"sens_list",
".",
"append",
"(",
"tdm",
".",
"parman",
".",
"parsets",
"[",
"cids",
"[",
"0",
"]",
"]",
")",
"sensitivities_lin",
"=",
"np",
".",
"array",
"(",
"sens_list",
")",
"# now convert to the log-sensitivities relevant for CRTomo and the",
"# resolution matrix",
"sensitivities_log",
"=",
"sensitivities_lin",
"# multiply measurements on first dimension",
"measurements_rep",
"=",
"np",
".",
"repeat",
"(",
"measurements",
"[",
":",
",",
"0",
",",
"np",
".",
"newaxis",
"]",
",",
"sensitivities_lin",
".",
"shape",
"[",
"1",
"]",
",",
"axis",
"=",
"1",
")",
"# sensitivities_log = sensitivities_log * mfit",
"# multiply resistivities on second dimension",
"m_rep",
"=",
"np",
".",
"repeat",
"(",
"m",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
",",
"sensitivities_lin",
".",
"shape",
"[",
"0",
"]",
",",
"axis",
"=",
"0",
")",
"# eq. 3.41 in Kemna, 2000: notice that m_rep here is in rho, not sigma",
"factor",
"=",
"-",
"1",
"/",
"(",
"m_rep",
"*",
"measurements_rep",
")",
"sensitivities_log",
"=",
"factor",
"*",
"sensitivities_lin",
"# import IPython",
"# IPython.embed()",
"return",
"sensitivities_log"
]
| Return the sensitivity matrix
Parameters
----------
log_sigma : numpy.ndarray
log_e conductivities | [
"Return",
"the",
"sensitivity",
"matrix"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/interface.py#L88-L136 | train |
redhat-openstack/python-tripleo-helper | tripleohelper/baremetal.py | BaremetalFactory.set_ironic_uuid | def set_ironic_uuid(self, uuid_list):
"""Map a list of Ironic UUID to BM nodes.
"""
# TODO(Gonéri): ensure we adjust the correct node
i = iter(self.nodes)
for uuid in uuid_list:
node = next(i)
node.uuid = uuid | python | def set_ironic_uuid(self, uuid_list):
"""Map a list of Ironic UUID to BM nodes.
"""
# TODO(Gonéri): ensure we adjust the correct node
i = iter(self.nodes)
for uuid in uuid_list:
node = next(i)
node.uuid = uuid | [
"def",
"set_ironic_uuid",
"(",
"self",
",",
"uuid_list",
")",
":",
"# TODO(Gonéri): ensure we adjust the correct node",
"i",
"=",
"iter",
"(",
"self",
".",
"nodes",
")",
"for",
"uuid",
"in",
"uuid_list",
":",
"node",
"=",
"next",
"(",
"i",
")",
"node",
".",
"uuid",
"=",
"uuid"
]
| Map a list of Ironic UUID to BM nodes. | [
"Map",
"a",
"list",
"of",
"Ironic",
"UUID",
"to",
"BM",
"nodes",
"."
]
| bfa165538335edb1088170c7a92f097167225c81 | https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/baremetal.py#L89-L96 | train |
althonos/moclo | moclo/moclo/registry/_utils.py | find_resistance | def find_resistance(record):
"""Infer the antibiotics resistance of the given record.
Arguments:
record (`~Bio.SeqRecord.SeqRecord`): an annotated sequence.
Raises:
RuntimeError: when there's not exactly one resistance cassette.
"""
for feature in record.features:
labels = set(feature.qualifiers.get("label", []))
cassettes = labels.intersection(_ANTIBIOTICS)
if len(cassettes) > 1:
raise RuntimeError("multiple resistance cassettes detected")
elif len(cassettes) == 1:
return _ANTIBIOTICS.get(cassettes.pop())
raise RuntimeError("could not find the resistance of '{}'".format(record.id)) | python | def find_resistance(record):
"""Infer the antibiotics resistance of the given record.
Arguments:
record (`~Bio.SeqRecord.SeqRecord`): an annotated sequence.
Raises:
RuntimeError: when there's not exactly one resistance cassette.
"""
for feature in record.features:
labels = set(feature.qualifiers.get("label", []))
cassettes = labels.intersection(_ANTIBIOTICS)
if len(cassettes) > 1:
raise RuntimeError("multiple resistance cassettes detected")
elif len(cassettes) == 1:
return _ANTIBIOTICS.get(cassettes.pop())
raise RuntimeError("could not find the resistance of '{}'".format(record.id)) | [
"def",
"find_resistance",
"(",
"record",
")",
":",
"for",
"feature",
"in",
"record",
".",
"features",
":",
"labels",
"=",
"set",
"(",
"feature",
".",
"qualifiers",
".",
"get",
"(",
"\"label\"",
",",
"[",
"]",
")",
")",
"cassettes",
"=",
"labels",
".",
"intersection",
"(",
"_ANTIBIOTICS",
")",
"if",
"len",
"(",
"cassettes",
")",
">",
"1",
":",
"raise",
"RuntimeError",
"(",
"\"multiple resistance cassettes detected\"",
")",
"elif",
"len",
"(",
"cassettes",
")",
"==",
"1",
":",
"return",
"_ANTIBIOTICS",
".",
"get",
"(",
"cassettes",
".",
"pop",
"(",
")",
")",
"raise",
"RuntimeError",
"(",
"\"could not find the resistance of '{}'\"",
".",
"format",
"(",
"record",
".",
"id",
")",
")"
]
| Infer the antibiotics resistance of the given record.
Arguments:
record (`~Bio.SeqRecord.SeqRecord`): an annotated sequence.
Raises:
RuntimeError: when there's not exactly one resistance cassette. | [
"Infer",
"the",
"antibiotics",
"resistance",
"of",
"the",
"given",
"record",
"."
]
| 28a03748df8a2fa43f0c0c8098ca64d11559434e | https://github.com/althonos/moclo/blob/28a03748df8a2fa43f0c0c8098ca64d11559434e/moclo/moclo/registry/_utils.py#L16-L33 | train |
rsms/tc | setup.py | shell_cmd | def shell_cmd(args, cwd=None):
'''Returns stdout as string or None on failure
'''
if cwd is None:
cwd = os.path.abspath('.')
if not isinstance(args, (list, tuple)):
args = [args]
ps = Popen(args, shell=True, cwd=cwd, stdout=PIPE, stderr=PIPE,
close_fds=True)
stdout, stderr = ps.communicate()
if ps.returncode != 0:
if stderr:
stderr = stderr.strip()
raise IOError('Shell command %s failed (exit status %r): %s' %\
(args, ps.returncode, stderr))
return stdout.strip() | python | def shell_cmd(args, cwd=None):
'''Returns stdout as string or None on failure
'''
if cwd is None:
cwd = os.path.abspath('.')
if not isinstance(args, (list, tuple)):
args = [args]
ps = Popen(args, shell=True, cwd=cwd, stdout=PIPE, stderr=PIPE,
close_fds=True)
stdout, stderr = ps.communicate()
if ps.returncode != 0:
if stderr:
stderr = stderr.strip()
raise IOError('Shell command %s failed (exit status %r): %s' %\
(args, ps.returncode, stderr))
return stdout.strip() | [
"def",
"shell_cmd",
"(",
"args",
",",
"cwd",
"=",
"None",
")",
":",
"if",
"cwd",
"is",
"None",
":",
"cwd",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"'.'",
")",
"if",
"not",
"isinstance",
"(",
"args",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"args",
"=",
"[",
"args",
"]",
"ps",
"=",
"Popen",
"(",
"args",
",",
"shell",
"=",
"True",
",",
"cwd",
"=",
"cwd",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
",",
"close_fds",
"=",
"True",
")",
"stdout",
",",
"stderr",
"=",
"ps",
".",
"communicate",
"(",
")",
"if",
"ps",
".",
"returncode",
"!=",
"0",
":",
"if",
"stderr",
":",
"stderr",
"=",
"stderr",
".",
"strip",
"(",
")",
"raise",
"IOError",
"(",
"'Shell command %s failed (exit status %r): %s'",
"%",
"(",
"args",
",",
"ps",
".",
"returncode",
",",
"stderr",
")",
")",
"return",
"stdout",
".",
"strip",
"(",
")"
]
| Returns stdout as string or None on failure | [
"Returns",
"stdout",
"as",
"string",
"or",
"None",
"on",
"failure"
]
| db5da0def734246818f4a6e4531be63b7cbaa236 | https://github.com/rsms/tc/blob/db5da0def734246818f4a6e4531be63b7cbaa236/setup.py#L36-L51 | train |
althonos/moclo | moclo/moclo/record.py | CircularRecord.reverse_complement | def reverse_complement(
self,
id=False,
name=False,
description=False,
features=True,
annotations=False,
letter_annotations=True,
dbxrefs=False,
):
"""Return a new ``CircularRecord`` with reverse complement sequence.
"""
return type(self)(
super(CircularRecord, self).reverse_complement(
id=id,
name=name,
description=description,
features=features,
annotations=annotations,
letter_annotations=letter_annotations,
dbxrefs=dbxrefs,
)
) | python | def reverse_complement(
self,
id=False,
name=False,
description=False,
features=True,
annotations=False,
letter_annotations=True,
dbxrefs=False,
):
"""Return a new ``CircularRecord`` with reverse complement sequence.
"""
return type(self)(
super(CircularRecord, self).reverse_complement(
id=id,
name=name,
description=description,
features=features,
annotations=annotations,
letter_annotations=letter_annotations,
dbxrefs=dbxrefs,
)
) | [
"def",
"reverse_complement",
"(",
"self",
",",
"id",
"=",
"False",
",",
"name",
"=",
"False",
",",
"description",
"=",
"False",
",",
"features",
"=",
"True",
",",
"annotations",
"=",
"False",
",",
"letter_annotations",
"=",
"True",
",",
"dbxrefs",
"=",
"False",
",",
")",
":",
"return",
"type",
"(",
"self",
")",
"(",
"super",
"(",
"CircularRecord",
",",
"self",
")",
".",
"reverse_complement",
"(",
"id",
"=",
"id",
",",
"name",
"=",
"name",
",",
"description",
"=",
"description",
",",
"features",
"=",
"features",
",",
"annotations",
"=",
"annotations",
",",
"letter_annotations",
"=",
"letter_annotations",
",",
"dbxrefs",
"=",
"dbxrefs",
",",
")",
")"
]
| Return a new ``CircularRecord`` with reverse complement sequence. | [
"Return",
"a",
"new",
"CircularRecord",
"with",
"reverse",
"complement",
"sequence",
"."
]
| 28a03748df8a2fa43f0c0c8098ca64d11559434e | https://github.com/althonos/moclo/blob/28a03748df8a2fa43f0c0c8098ca64d11559434e/moclo/moclo/record.py#L141-L163 | train |
redhat-openstack/python-tripleo-helper | tripleohelper/ssh.py | SshClient.load_private_key | def load_private_key(self, priv_key):
"""Register the SSH private key."""
with open(priv_key) as fd:
self._private_key = paramiko.RSAKey.from_private_key(fd) | python | def load_private_key(self, priv_key):
"""Register the SSH private key."""
with open(priv_key) as fd:
self._private_key = paramiko.RSAKey.from_private_key(fd) | [
"def",
"load_private_key",
"(",
"self",
",",
"priv_key",
")",
":",
"with",
"open",
"(",
"priv_key",
")",
"as",
"fd",
":",
"self",
".",
"_private_key",
"=",
"paramiko",
".",
"RSAKey",
".",
"from_private_key",
"(",
"fd",
")"
]
| Register the SSH private key. | [
"Register",
"the",
"SSH",
"private",
"key",
"."
]
| bfa165538335edb1088170c7a92f097167225c81 | https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/ssh.py#L68-L71 | train |
redhat-openstack/python-tripleo-helper | tripleohelper/ssh.py | SshClient.start | def start(self):
"""Start the ssh client and connect to the host.
It will wait until the ssh service is available during 90 seconds.
If it doesn't succed to connect then the function will raise
an SSHException.
"""
if self.via_ip:
connect_to = self.via_ip
self.description = '[%s@%s via %s]' % (self._user,
self._hostname,
self.via_ip)
else:
connect_to = self._hostname
self.description = '[%s@%s]' % (self._user,
self._hostname)
exception = None
for i in range(60):
try:
self._client.connect(
connect_to,
username=self._user,
allow_agent=True,
key_filename=self._key_filename)
# NOTE(Gonéri): TypeError is in the list because of
# https://github.com/paramiko/paramiko/issues/615
self._transport = self._get_transport()
except (OSError,
TypeError,
ssh_exception.SSHException,
ssh_exception.NoValidConnectionsError) as e:
exception = e
LOG.info('%s waiting for %s: %s' %
(self.description, connect_to, str(exception)))
time.sleep(1)
else:
LOG.debug('%s connected' % self.description)
self._started = True
return
_error = ("unable to connect to ssh service on '%s': %s" %
(self._hostname, str(exception)))
LOG.error(_error)
raise exception | python | def start(self):
"""Start the ssh client and connect to the host.
It will wait until the ssh service is available during 90 seconds.
If it doesn't succed to connect then the function will raise
an SSHException.
"""
if self.via_ip:
connect_to = self.via_ip
self.description = '[%s@%s via %s]' % (self._user,
self._hostname,
self.via_ip)
else:
connect_to = self._hostname
self.description = '[%s@%s]' % (self._user,
self._hostname)
exception = None
for i in range(60):
try:
self._client.connect(
connect_to,
username=self._user,
allow_agent=True,
key_filename=self._key_filename)
# NOTE(Gonéri): TypeError is in the list because of
# https://github.com/paramiko/paramiko/issues/615
self._transport = self._get_transport()
except (OSError,
TypeError,
ssh_exception.SSHException,
ssh_exception.NoValidConnectionsError) as e:
exception = e
LOG.info('%s waiting for %s: %s' %
(self.description, connect_to, str(exception)))
time.sleep(1)
else:
LOG.debug('%s connected' % self.description)
self._started = True
return
_error = ("unable to connect to ssh service on '%s': %s" %
(self._hostname, str(exception)))
LOG.error(_error)
raise exception | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"via_ip",
":",
"connect_to",
"=",
"self",
".",
"via_ip",
"self",
".",
"description",
"=",
"'[%s@%s via %s]'",
"%",
"(",
"self",
".",
"_user",
",",
"self",
".",
"_hostname",
",",
"self",
".",
"via_ip",
")",
"else",
":",
"connect_to",
"=",
"self",
".",
"_hostname",
"self",
".",
"description",
"=",
"'[%s@%s]'",
"%",
"(",
"self",
".",
"_user",
",",
"self",
".",
"_hostname",
")",
"exception",
"=",
"None",
"for",
"i",
"in",
"range",
"(",
"60",
")",
":",
"try",
":",
"self",
".",
"_client",
".",
"connect",
"(",
"connect_to",
",",
"username",
"=",
"self",
".",
"_user",
",",
"allow_agent",
"=",
"True",
",",
"key_filename",
"=",
"self",
".",
"_key_filename",
")",
"# NOTE(Gonéri): TypeError is in the list because of",
"# https://github.com/paramiko/paramiko/issues/615",
"self",
".",
"_transport",
"=",
"self",
".",
"_get_transport",
"(",
")",
"except",
"(",
"OSError",
",",
"TypeError",
",",
"ssh_exception",
".",
"SSHException",
",",
"ssh_exception",
".",
"NoValidConnectionsError",
")",
"as",
"e",
":",
"exception",
"=",
"e",
"LOG",
".",
"info",
"(",
"'%s waiting for %s: %s'",
"%",
"(",
"self",
".",
"description",
",",
"connect_to",
",",
"str",
"(",
"exception",
")",
")",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"else",
":",
"LOG",
".",
"debug",
"(",
"'%s connected'",
"%",
"self",
".",
"description",
")",
"self",
".",
"_started",
"=",
"True",
"return",
"_error",
"=",
"(",
"\"unable to connect to ssh service on '%s': %s\"",
"%",
"(",
"self",
".",
"_hostname",
",",
"str",
"(",
"exception",
")",
")",
")",
"LOG",
".",
"error",
"(",
"_error",
")",
"raise",
"exception"
]
| Start the ssh client and connect to the host.
It will wait until the ssh service is available during 90 seconds.
If it doesn't succed to connect then the function will raise
an SSHException. | [
"Start",
"the",
"ssh",
"client",
"and",
"connect",
"to",
"the",
"host",
"."
]
| bfa165538335edb1088170c7a92f097167225c81 | https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/ssh.py#L98-L142 | train |
redhat-openstack/python-tripleo-helper | tripleohelper/ssh.py | SshClient._get_channel | def _get_channel(self):
"""Returns a channel according to if there is a redirection to do or
not.
"""
channel = self._transport.open_session()
channel.set_combine_stderr(True)
channel.get_pty()
return channel | python | def _get_channel(self):
"""Returns a channel according to if there is a redirection to do or
not.
"""
channel = self._transport.open_session()
channel.set_combine_stderr(True)
channel.get_pty()
return channel | [
"def",
"_get_channel",
"(",
"self",
")",
":",
"channel",
"=",
"self",
".",
"_transport",
".",
"open_session",
"(",
")",
"channel",
".",
"set_combine_stderr",
"(",
"True",
")",
"channel",
".",
"get_pty",
"(",
")",
"return",
"channel"
]
| Returns a channel according to if there is a redirection to do or
not. | [
"Returns",
"a",
"channel",
"according",
"to",
"if",
"there",
"is",
"a",
"redirection",
"to",
"do",
"or",
"not",
"."
]
| bfa165538335edb1088170c7a92f097167225c81 | https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/ssh.py#L237-L244 | train |
NJDFan/ctypes-bitfield | bitfield/__init__.py | print_fields | def print_fields(bf, *args, **kwargs):
"""
Print all the fields of a Bitfield object to stdout. This is
primarly a diagnostic aid during debugging.
"""
vals = {k: hex(v) for k, v in bf.items()}
print(bf.base, vals, *args, **kwargs) | python | def print_fields(bf, *args, **kwargs):
"""
Print all the fields of a Bitfield object to stdout. This is
primarly a diagnostic aid during debugging.
"""
vals = {k: hex(v) for k, v in bf.items()}
print(bf.base, vals, *args, **kwargs) | [
"def",
"print_fields",
"(",
"bf",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"vals",
"=",
"{",
"k",
":",
"hex",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"bf",
".",
"items",
"(",
")",
"}",
"print",
"(",
"bf",
".",
"base",
",",
"vals",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| Print all the fields of a Bitfield object to stdout. This is
primarly a diagnostic aid during debugging. | [
"Print",
"all",
"the",
"fields",
"of",
"a",
"Bitfield",
"object",
"to",
"stdout",
".",
"This",
"is",
"primarly",
"a",
"diagnostic",
"aid",
"during",
"debugging",
"."
]
| ae76b1dcfef7ecc90bd1900735b94ddee41a6376 | https://github.com/NJDFan/ctypes-bitfield/blob/ae76b1dcfef7ecc90bd1900735b94ddee41a6376/bitfield/__init__.py#L201-L208 | train |
NJDFan/ctypes-bitfield | bitfield/__init__.py | Bitfield.clone | def clone(self):
"""Return a new bitfield with the same value.
The returned value is a copy, and so is no longer linked to the
original bitfield. This is important when the original is located
at anything other than normal memory, with accesses to it either
slow or having side effects. Creating a clone, and working
against that clone, means that only one read will occur.
"""
temp = self.__class__()
temp.base = self.base
return temp | python | def clone(self):
"""Return a new bitfield with the same value.
The returned value is a copy, and so is no longer linked to the
original bitfield. This is important when the original is located
at anything other than normal memory, with accesses to it either
slow or having side effects. Creating a clone, and working
against that clone, means that only one read will occur.
"""
temp = self.__class__()
temp.base = self.base
return temp | [
"def",
"clone",
"(",
"self",
")",
":",
"temp",
"=",
"self",
".",
"__class__",
"(",
")",
"temp",
".",
"base",
"=",
"self",
".",
"base",
"return",
"temp"
]
| Return a new bitfield with the same value.
The returned value is a copy, and so is no longer linked to the
original bitfield. This is important when the original is located
at anything other than normal memory, with accesses to it either
slow or having side effects. Creating a clone, and working
against that clone, means that only one read will occur. | [
"Return",
"a",
"new",
"bitfield",
"with",
"the",
"same",
"value",
".",
"The",
"returned",
"value",
"is",
"a",
"copy",
"and",
"so",
"is",
"no",
"longer",
"linked",
"to",
"the",
"original",
"bitfield",
".",
"This",
"is",
"important",
"when",
"the",
"original",
"is",
"located",
"at",
"anything",
"other",
"than",
"normal",
"memory",
"with",
"accesses",
"to",
"it",
"either",
"slow",
"or",
"having",
"side",
"effects",
".",
"Creating",
"a",
"clone",
"and",
"working",
"against",
"that",
"clone",
"means",
"that",
"only",
"one",
"read",
"will",
"occur",
"."
]
| ae76b1dcfef7ecc90bd1900735b94ddee41a6376 | https://github.com/NJDFan/ctypes-bitfield/blob/ae76b1dcfef7ecc90bd1900735b94ddee41a6376/bitfield/__init__.py#L44-L56 | train |
gebn/wood | wood/comparison.py | Comparison.new | def new(self, base: pathlib.PurePath = pathlib.PurePath(),
include_intermediates: bool = True) -> Iterator[str]:
"""
Find the list of new paths in this comparison.
:param base: The base directory to prepend to the right entity's name.
:param include_intermediates: Whether to include new non-empty
directories in the returned iterable. If
you only care about files, or are using
flat key-based storage system like S3
where directories are a made-up concept,
this can be set to false.
:return: An iterator of the new paths.
"""
if self.is_new:
yield str(base / self.right.name) | python | def new(self, base: pathlib.PurePath = pathlib.PurePath(),
include_intermediates: bool = True) -> Iterator[str]:
"""
Find the list of new paths in this comparison.
:param base: The base directory to prepend to the right entity's name.
:param include_intermediates: Whether to include new non-empty
directories in the returned iterable. If
you only care about files, or are using
flat key-based storage system like S3
where directories are a made-up concept,
this can be set to false.
:return: An iterator of the new paths.
"""
if self.is_new:
yield str(base / self.right.name) | [
"def",
"new",
"(",
"self",
",",
"base",
":",
"pathlib",
".",
"PurePath",
"=",
"pathlib",
".",
"PurePath",
"(",
")",
",",
"include_intermediates",
":",
"bool",
"=",
"True",
")",
"->",
"Iterator",
"[",
"str",
"]",
":",
"if",
"self",
".",
"is_new",
":",
"yield",
"str",
"(",
"base",
"/",
"self",
".",
"right",
".",
"name",
")"
]
| Find the list of new paths in this comparison.
:param base: The base directory to prepend to the right entity's name.
:param include_intermediates: Whether to include new non-empty
directories in the returned iterable. If
you only care about files, or are using
flat key-based storage system like S3
where directories are a made-up concept,
this can be set to false.
:return: An iterator of the new paths. | [
"Find",
"the",
"list",
"of",
"new",
"paths",
"in",
"this",
"comparison",
"."
]
| efc71879890dbd2f2d7a0b1a65ed22a0843139dd | https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L61-L76 | train |
gebn/wood | wood/comparison.py | Comparison.modified | def modified(self, base: pathlib.PurePath = pathlib.PurePath()) \
-> Iterator[str]:
"""
Find the paths of modified files. There is no option to include
intermediate directories, as all files and directories exist in both
the left and right trees.
:param base: The base directory to recursively append to the right
entity.
:return: An iterable of paths of modified files.
"""
# N.B. this method will only ever return files, as directories cannot
# be "modified"
if self.is_modified:
yield str(base / self.right.name) | python | def modified(self, base: pathlib.PurePath = pathlib.PurePath()) \
-> Iterator[str]:
"""
Find the paths of modified files. There is no option to include
intermediate directories, as all files and directories exist in both
the left and right trees.
:param base: The base directory to recursively append to the right
entity.
:return: An iterable of paths of modified files.
"""
# N.B. this method will only ever return files, as directories cannot
# be "modified"
if self.is_modified:
yield str(base / self.right.name) | [
"def",
"modified",
"(",
"self",
",",
"base",
":",
"pathlib",
".",
"PurePath",
"=",
"pathlib",
".",
"PurePath",
"(",
")",
")",
"->",
"Iterator",
"[",
"str",
"]",
":",
"# N.B. this method will only ever return files, as directories cannot",
"# be \"modified\"",
"if",
"self",
".",
"is_modified",
":",
"yield",
"str",
"(",
"base",
"/",
"self",
".",
"right",
".",
"name",
")"
]
| Find the paths of modified files. There is no option to include
intermediate directories, as all files and directories exist in both
the left and right trees.
:param base: The base directory to recursively append to the right
entity.
:return: An iterable of paths of modified files. | [
"Find",
"the",
"paths",
"of",
"modified",
"files",
".",
"There",
"is",
"no",
"option",
"to",
"include",
"intermediate",
"directories",
"as",
"all",
"files",
"and",
"directories",
"exist",
"in",
"both",
"the",
"left",
"and",
"right",
"trees",
"."
]
| efc71879890dbd2f2d7a0b1a65ed22a0843139dd | https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L91-L105 | train |
gebn/wood | wood/comparison.py | Comparison.deleted | def deleted(self, base: pathlib.PurePath = pathlib.PurePath(),
include_children: bool = True,
include_directories: bool = True) -> Iterator[str]:
"""
Find the paths of entities deleted between the left and right entities
in this comparison.
:param base: The base directory to recursively append to entities.
:param include_children: Whether to recursively include children of
deleted directories. These are themselves
deleted by definition, however it may be
useful to the caller to list them explicitly.
:param include_directories: Whether to include directories in the
returned iterable.
:return: An iterable of deleted paths.
"""
if self.is_deleted:
yield str(base / self.left.name) | python | def deleted(self, base: pathlib.PurePath = pathlib.PurePath(),
include_children: bool = True,
include_directories: bool = True) -> Iterator[str]:
"""
Find the paths of entities deleted between the left and right entities
in this comparison.
:param base: The base directory to recursively append to entities.
:param include_children: Whether to recursively include children of
deleted directories. These are themselves
deleted by definition, however it may be
useful to the caller to list them explicitly.
:param include_directories: Whether to include directories in the
returned iterable.
:return: An iterable of deleted paths.
"""
if self.is_deleted:
yield str(base / self.left.name) | [
"def",
"deleted",
"(",
"self",
",",
"base",
":",
"pathlib",
".",
"PurePath",
"=",
"pathlib",
".",
"PurePath",
"(",
")",
",",
"include_children",
":",
"bool",
"=",
"True",
",",
"include_directories",
":",
"bool",
"=",
"True",
")",
"->",
"Iterator",
"[",
"str",
"]",
":",
"if",
"self",
".",
"is_deleted",
":",
"yield",
"str",
"(",
"base",
"/",
"self",
".",
"left",
".",
"name",
")"
]
| Find the paths of entities deleted between the left and right entities
in this comparison.
:param base: The base directory to recursively append to entities.
:param include_children: Whether to recursively include children of
deleted directories. These are themselves
deleted by definition, however it may be
useful to the caller to list them explicitly.
:param include_directories: Whether to include directories in the
returned iterable.
:return: An iterable of deleted paths. | [
"Find",
"the",
"paths",
"of",
"entities",
"deleted",
"between",
"the",
"left",
"and",
"right",
"entities",
"in",
"this",
"comparison",
"."
]
| efc71879890dbd2f2d7a0b1a65ed22a0843139dd | https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L116-L133 | train |
gebn/wood | wood/comparison.py | Comparison.compare | def compare(left: Optional[L], right: Optional[R]) -> 'Comparison[L, R]':
"""
Calculate the comparison of two entities.
| left | right | Return Type |
|===========|===========|=========================|
| file | file | FileComparison |
| file | directory | FileDirectoryComparison |
| file | None | FileComparison |
| directory | file | DirectoryFileComparison |
| directory | directory | DirectoryComparison |
| directory | None | DirectoryComparison |
| None | file | FileComparison |
| None | directory | DirectoryComparison |
| None | None | TypeError |
:param left: The left side or "before" entity.
:param right: The right side or "after" entity.
:return: See table above.
"""
if isinstance(left, File) and isinstance(right, Directory):
return FileDirectoryComparison(left, right)
if isinstance(left, Directory) and isinstance(right, File):
return DirectoryFileComparison(left, right)
if isinstance(left, File) or isinstance(right, File):
return FileComparison(left, right)
if isinstance(left, Directory) or isinstance(right, Directory):
return DirectoryComparison(left, right)
raise TypeError(f'Cannot compare entities: {left}, {right}') | python | def compare(left: Optional[L], right: Optional[R]) -> 'Comparison[L, R]':
"""
Calculate the comparison of two entities.
| left | right | Return Type |
|===========|===========|=========================|
| file | file | FileComparison |
| file | directory | FileDirectoryComparison |
| file | None | FileComparison |
| directory | file | DirectoryFileComparison |
| directory | directory | DirectoryComparison |
| directory | None | DirectoryComparison |
| None | file | FileComparison |
| None | directory | DirectoryComparison |
| None | None | TypeError |
:param left: The left side or "before" entity.
:param right: The right side or "after" entity.
:return: See table above.
"""
if isinstance(left, File) and isinstance(right, Directory):
return FileDirectoryComparison(left, right)
if isinstance(left, Directory) and isinstance(right, File):
return DirectoryFileComparison(left, right)
if isinstance(left, File) or isinstance(right, File):
return FileComparison(left, right)
if isinstance(left, Directory) or isinstance(right, Directory):
return DirectoryComparison(left, right)
raise TypeError(f'Cannot compare entities: {left}, {right}') | [
"def",
"compare",
"(",
"left",
":",
"Optional",
"[",
"L",
"]",
",",
"right",
":",
"Optional",
"[",
"R",
"]",
")",
"->",
"'Comparison[L, R]'",
":",
"if",
"isinstance",
"(",
"left",
",",
"File",
")",
"and",
"isinstance",
"(",
"right",
",",
"Directory",
")",
":",
"return",
"FileDirectoryComparison",
"(",
"left",
",",
"right",
")",
"if",
"isinstance",
"(",
"left",
",",
"Directory",
")",
"and",
"isinstance",
"(",
"right",
",",
"File",
")",
":",
"return",
"DirectoryFileComparison",
"(",
"left",
",",
"right",
")",
"if",
"isinstance",
"(",
"left",
",",
"File",
")",
"or",
"isinstance",
"(",
"right",
",",
"File",
")",
":",
"return",
"FileComparison",
"(",
"left",
",",
"right",
")",
"if",
"isinstance",
"(",
"left",
",",
"Directory",
")",
"or",
"isinstance",
"(",
"right",
",",
"Directory",
")",
":",
"return",
"DirectoryComparison",
"(",
"left",
",",
"right",
")",
"raise",
"TypeError",
"(",
"f'Cannot compare entities: {left}, {right}'",
")"
]
| Calculate the comparison of two entities.
| left | right | Return Type |
|===========|===========|=========================|
| file | file | FileComparison |
| file | directory | FileDirectoryComparison |
| file | None | FileComparison |
| directory | file | DirectoryFileComparison |
| directory | directory | DirectoryComparison |
| directory | None | DirectoryComparison |
| None | file | FileComparison |
| None | directory | DirectoryComparison |
| None | None | TypeError |
:param left: The left side or "before" entity.
:param right: The right side or "after" entity.
:return: See table above. | [
"Calculate",
"the",
"comparison",
"of",
"two",
"entities",
"."
]
| efc71879890dbd2f2d7a0b1a65ed22a0843139dd | https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L157-L189 | train |
gebn/wood | wood/comparison.py | Comparison.print_hierarchy | def print_hierarchy(self, level: int = 0, file: IO[str] = sys.stdout) \
-> None:
"""
Print this comparison and its children with indentation to represent
nesting.
:param level: The level of indentation to use. This is mostly for
internal use, but you can use it to inset the root
comparison.
:param file: The stream to print to. Defaults to stdout.
"""
print(' ' * self._INDENT_SIZE * level + str(self), file=file) | python | def print_hierarchy(self, level: int = 0, file: IO[str] = sys.stdout) \
-> None:
"""
Print this comparison and its children with indentation to represent
nesting.
:param level: The level of indentation to use. This is mostly for
internal use, but you can use it to inset the root
comparison.
:param file: The stream to print to. Defaults to stdout.
"""
print(' ' * self._INDENT_SIZE * level + str(self), file=file) | [
"def",
"print_hierarchy",
"(",
"self",
",",
"level",
":",
"int",
"=",
"0",
",",
"file",
":",
"IO",
"[",
"str",
"]",
"=",
"sys",
".",
"stdout",
")",
"->",
"None",
":",
"print",
"(",
"' '",
"*",
"self",
".",
"_INDENT_SIZE",
"*",
"level",
"+",
"str",
"(",
"self",
")",
",",
"file",
"=",
"file",
")"
]
| Print this comparison and its children with indentation to represent
nesting.
:param level: The level of indentation to use. This is mostly for
internal use, but you can use it to inset the root
comparison.
:param file: The stream to print to. Defaults to stdout. | [
"Print",
"this",
"comparison",
"and",
"its",
"children",
"with",
"indentation",
"to",
"represent",
"nesting",
"."
]
| efc71879890dbd2f2d7a0b1a65ed22a0843139dd | https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L191-L202 | train |
gebn/wood | wood/comparison.py | FileComparison.is_modified | def is_modified(self) -> bool:
"""
Find whether the files on the left and right are different. Note,
modified implies the contents of the file have changed, which is
predicated on the file existing on both the left and right. Therefore
this will be false if the file on the left has been deleted, or the
file on the right is new.
:return: Whether the file has been modified.
"""
if self.is_new or self.is_deleted:
return False
return self.left.md5 != self.right.md5 | python | def is_modified(self) -> bool:
"""
Find whether the files on the left and right are different. Note,
modified implies the contents of the file have changed, which is
predicated on the file existing on both the left and right. Therefore
this will be false if the file on the left has been deleted, or the
file on the right is new.
:return: Whether the file has been modified.
"""
if self.is_new or self.is_deleted:
return False
return self.left.md5 != self.right.md5 | [
"def",
"is_modified",
"(",
"self",
")",
"->",
"bool",
":",
"if",
"self",
".",
"is_new",
"or",
"self",
".",
"is_deleted",
":",
"return",
"False",
"return",
"self",
".",
"left",
".",
"md5",
"!=",
"self",
".",
"right",
".",
"md5"
]
| Find whether the files on the left and right are different. Note,
modified implies the contents of the file have changed, which is
predicated on the file existing on both the left and right. Therefore
this will be false if the file on the left has been deleted, or the
file on the right is new.
:return: Whether the file has been modified. | [
"Find",
"whether",
"the",
"files",
"on",
"the",
"left",
"and",
"right",
"are",
"different",
".",
"Note",
"modified",
"implies",
"the",
"contents",
"of",
"the",
"file",
"have",
"changed",
"which",
"is",
"predicated",
"on",
"the",
"file",
"existing",
"on",
"both",
"the",
"left",
"and",
"right",
".",
"Therefore",
"this",
"will",
"be",
"false",
"if",
"the",
"file",
"on",
"the",
"left",
"has",
"been",
"deleted",
"or",
"the",
"file",
"on",
"the",
"right",
"is",
"new",
"."
]
| efc71879890dbd2f2d7a0b1a65ed22a0843139dd | https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L224-L236 | train |
pgxcentre/geneparse | geneparse/index/impute2.py | generate_index | def generate_index(fn, cols=None, names=None, sep=" "):
"""Build a index for the given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index.
"""
# Some assertions
assert cols is not None, "'cols' was not set"
assert names is not None, "'names' was not set"
assert len(cols) == len(names)
# Getting the open function
bgzip, open_func = get_open_func(fn, return_fmt=True)
# Reading the required columns
data = pd.read_csv(fn, sep=sep, engine="c", usecols=cols, names=names,
compression="gzip" if bgzip else None)
# Getting the seek information
f = open_func(fn, "rb")
data["seek"] = np.fromiter(_seek_generator(f), dtype=np.uint)[:-1]
f.close()
# Saving the index to file
write_index(get_index_fn(fn), data)
return data | python | def generate_index(fn, cols=None, names=None, sep=" "):
"""Build a index for the given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index.
"""
# Some assertions
assert cols is not None, "'cols' was not set"
assert names is not None, "'names' was not set"
assert len(cols) == len(names)
# Getting the open function
bgzip, open_func = get_open_func(fn, return_fmt=True)
# Reading the required columns
data = pd.read_csv(fn, sep=sep, engine="c", usecols=cols, names=names,
compression="gzip" if bgzip else None)
# Getting the seek information
f = open_func(fn, "rb")
data["seek"] = np.fromiter(_seek_generator(f), dtype=np.uint)[:-1]
f.close()
# Saving the index to file
write_index(get_index_fn(fn), data)
return data | [
"def",
"generate_index",
"(",
"fn",
",",
"cols",
"=",
"None",
",",
"names",
"=",
"None",
",",
"sep",
"=",
"\" \"",
")",
":",
"# Some assertions",
"assert",
"cols",
"is",
"not",
"None",
",",
"\"'cols' was not set\"",
"assert",
"names",
"is",
"not",
"None",
",",
"\"'names' was not set\"",
"assert",
"len",
"(",
"cols",
")",
"==",
"len",
"(",
"names",
")",
"# Getting the open function",
"bgzip",
",",
"open_func",
"=",
"get_open_func",
"(",
"fn",
",",
"return_fmt",
"=",
"True",
")",
"# Reading the required columns",
"data",
"=",
"pd",
".",
"read_csv",
"(",
"fn",
",",
"sep",
"=",
"sep",
",",
"engine",
"=",
"\"c\"",
",",
"usecols",
"=",
"cols",
",",
"names",
"=",
"names",
",",
"compression",
"=",
"\"gzip\"",
"if",
"bgzip",
"else",
"None",
")",
"# Getting the seek information",
"f",
"=",
"open_func",
"(",
"fn",
",",
"\"rb\"",
")",
"data",
"[",
"\"seek\"",
"]",
"=",
"np",
".",
"fromiter",
"(",
"_seek_generator",
"(",
"f",
")",
",",
"dtype",
"=",
"np",
".",
"uint",
")",
"[",
":",
"-",
"1",
"]",
"f",
".",
"close",
"(",
")",
"# Saving the index to file",
"write_index",
"(",
"get_index_fn",
"(",
"fn",
")",
",",
"data",
")",
"return",
"data"
]
| Build a index for the given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index. | [
"Build",
"a",
"index",
"for",
"the",
"given",
"file",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/impute2.py#L59-L92 | train |
pgxcentre/geneparse | geneparse/index/impute2.py | get_open_func | def get_open_func(fn, return_fmt=False):
"""Get the opening function.
Args:
fn (str): the name of the file.
return_fmt (bool): if the file format needs to be returned.
Returns:
tuple: either a tuple containing two elements: a boolean telling if the
format is bgzip, and the opening function.
"""
# The file might be compressed using bgzip
bgzip = None
with open(fn, "rb") as i_file:
bgzip = i_file.read(3) == b"\x1f\x8b\x08"
if bgzip and not HAS_BIOPYTHON:
raise ValueError("needs BioPython to index a bgzip file")
open_func = open
if bgzip:
open_func = BgzfReader
# Trying to read
try:
with open_func(fn, "r") as i_file:
if bgzip:
if not i_file.seekable():
raise ValueError
pass
except ValueError:
raise ValueError("{}: use bgzip for compression...".format(fn))
if return_fmt:
return bgzip, open_func
return open_func | python | def get_open_func(fn, return_fmt=False):
"""Get the opening function.
Args:
fn (str): the name of the file.
return_fmt (bool): if the file format needs to be returned.
Returns:
tuple: either a tuple containing two elements: a boolean telling if the
format is bgzip, and the opening function.
"""
# The file might be compressed using bgzip
bgzip = None
with open(fn, "rb") as i_file:
bgzip = i_file.read(3) == b"\x1f\x8b\x08"
if bgzip and not HAS_BIOPYTHON:
raise ValueError("needs BioPython to index a bgzip file")
open_func = open
if bgzip:
open_func = BgzfReader
# Trying to read
try:
with open_func(fn, "r") as i_file:
if bgzip:
if not i_file.seekable():
raise ValueError
pass
except ValueError:
raise ValueError("{}: use bgzip for compression...".format(fn))
if return_fmt:
return bgzip, open_func
return open_func | [
"def",
"get_open_func",
"(",
"fn",
",",
"return_fmt",
"=",
"False",
")",
":",
"# The file might be compressed using bgzip",
"bgzip",
"=",
"None",
"with",
"open",
"(",
"fn",
",",
"\"rb\"",
")",
"as",
"i_file",
":",
"bgzip",
"=",
"i_file",
".",
"read",
"(",
"3",
")",
"==",
"b\"\\x1f\\x8b\\x08\"",
"if",
"bgzip",
"and",
"not",
"HAS_BIOPYTHON",
":",
"raise",
"ValueError",
"(",
"\"needs BioPython to index a bgzip file\"",
")",
"open_func",
"=",
"open",
"if",
"bgzip",
":",
"open_func",
"=",
"BgzfReader",
"# Trying to read",
"try",
":",
"with",
"open_func",
"(",
"fn",
",",
"\"r\"",
")",
"as",
"i_file",
":",
"if",
"bgzip",
":",
"if",
"not",
"i_file",
".",
"seekable",
"(",
")",
":",
"raise",
"ValueError",
"pass",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"{}: use bgzip for compression...\"",
".",
"format",
"(",
"fn",
")",
")",
"if",
"return_fmt",
":",
"return",
"bgzip",
",",
"open_func",
"return",
"open_func"
]
| Get the opening function.
Args:
fn (str): the name of the file.
return_fmt (bool): if the file format needs to be returned.
Returns:
tuple: either a tuple containing two elements: a boolean telling if the
format is bgzip, and the opening function. | [
"Get",
"the",
"opening",
"function",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/impute2.py#L95-L133 | train |
pgxcentre/geneparse | geneparse/index/impute2.py | get_index | def get_index(fn, cols, names, sep):
"""Restores the index for a given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index.
If the index doesn't exist for the file, it is first created.
"""
if not has_index(fn):
# The index doesn't exists, generate it
return generate_index(fn, cols, names, sep)
# Retrieving the index
file_index = read_index(get_index_fn(fn))
# Checking the names are there
if len(set(names) - (set(file_index.columns) - {'seek'})) != 0:
raise ValueError("{}: missing index columns: reindex".format(fn))
if "seek" not in file_index.columns:
raise ValueError("{}: invalid index: reindex".format(fn))
return file_index | python | def get_index(fn, cols, names, sep):
"""Restores the index for a given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index.
If the index doesn't exist for the file, it is first created.
"""
if not has_index(fn):
# The index doesn't exists, generate it
return generate_index(fn, cols, names, sep)
# Retrieving the index
file_index = read_index(get_index_fn(fn))
# Checking the names are there
if len(set(names) - (set(file_index.columns) - {'seek'})) != 0:
raise ValueError("{}: missing index columns: reindex".format(fn))
if "seek" not in file_index.columns:
raise ValueError("{}: invalid index: reindex".format(fn))
return file_index | [
"def",
"get_index",
"(",
"fn",
",",
"cols",
",",
"names",
",",
"sep",
")",
":",
"if",
"not",
"has_index",
"(",
"fn",
")",
":",
"# The index doesn't exists, generate it",
"return",
"generate_index",
"(",
"fn",
",",
"cols",
",",
"names",
",",
"sep",
")",
"# Retrieving the index",
"file_index",
"=",
"read_index",
"(",
"get_index_fn",
"(",
"fn",
")",
")",
"# Checking the names are there",
"if",
"len",
"(",
"set",
"(",
"names",
")",
"-",
"(",
"set",
"(",
"file_index",
".",
"columns",
")",
"-",
"{",
"'seek'",
"}",
")",
")",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"{}: missing index columns: reindex\"",
".",
"format",
"(",
"fn",
")",
")",
"if",
"\"seek\"",
"not",
"in",
"file_index",
".",
"columns",
":",
"raise",
"ValueError",
"(",
"\"{}: invalid index: reindex\"",
".",
"format",
"(",
"fn",
")",
")",
"return",
"file_index"
]
| Restores the index for a given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index.
If the index doesn't exist for the file, it is first created. | [
"Restores",
"the",
"index",
"for",
"a",
"given",
"file",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/impute2.py#L136-L165 | train |
pgxcentre/geneparse | geneparse/index/impute2.py | write_index | def write_index(fn, index):
"""Writes the index to file.
Args:
fn (str): the name of the file that will contain the index.
index (pandas.DataFrame): the index.
"""
with open(fn, "wb") as o_file:
o_file.write(_CHECK_STRING)
o_file.write(zlib.compress(bytes(
index.to_csv(None, index=False, encoding="utf-8"),
encoding="utf-8",
))) | python | def write_index(fn, index):
"""Writes the index to file.
Args:
fn (str): the name of the file that will contain the index.
index (pandas.DataFrame): the index.
"""
with open(fn, "wb") as o_file:
o_file.write(_CHECK_STRING)
o_file.write(zlib.compress(bytes(
index.to_csv(None, index=False, encoding="utf-8"),
encoding="utf-8",
))) | [
"def",
"write_index",
"(",
"fn",
",",
"index",
")",
":",
"with",
"open",
"(",
"fn",
",",
"\"wb\"",
")",
"as",
"o_file",
":",
"o_file",
".",
"write",
"(",
"_CHECK_STRING",
")",
"o_file",
".",
"write",
"(",
"zlib",
".",
"compress",
"(",
"bytes",
"(",
"index",
".",
"to_csv",
"(",
"None",
",",
"index",
"=",
"False",
",",
"encoding",
"=",
"\"utf-8\"",
")",
",",
"encoding",
"=",
"\"utf-8\"",
",",
")",
")",
")"
]
| Writes the index to file.
Args:
fn (str): the name of the file that will contain the index.
index (pandas.DataFrame): the index. | [
"Writes",
"the",
"index",
"to",
"file",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/impute2.py#L168-L181 | train |
pgxcentre/geneparse | geneparse/index/impute2.py | read_index | def read_index(fn):
"""Reads index from file.
Args:
fn (str): the name of the file containing the index.
Returns:
pandas.DataFrame: the index of the file.
Before reading the index, we check the first couple of bytes to see if it
is a valid index file.
"""
index = None
with open(fn, "rb") as i_file:
if i_file.read(len(_CHECK_STRING)) != _CHECK_STRING:
raise ValueError("{}: not a valid index file".format(fn))
index = pd.read_csv(io.StringIO(
zlib.decompress(i_file.read()).decode(encoding="utf-8"),
))
return index | python | def read_index(fn):
"""Reads index from file.
Args:
fn (str): the name of the file containing the index.
Returns:
pandas.DataFrame: the index of the file.
Before reading the index, we check the first couple of bytes to see if it
is a valid index file.
"""
index = None
with open(fn, "rb") as i_file:
if i_file.read(len(_CHECK_STRING)) != _CHECK_STRING:
raise ValueError("{}: not a valid index file".format(fn))
index = pd.read_csv(io.StringIO(
zlib.decompress(i_file.read()).decode(encoding="utf-8"),
))
return index | [
"def",
"read_index",
"(",
"fn",
")",
":",
"index",
"=",
"None",
"with",
"open",
"(",
"fn",
",",
"\"rb\"",
")",
"as",
"i_file",
":",
"if",
"i_file",
".",
"read",
"(",
"len",
"(",
"_CHECK_STRING",
")",
")",
"!=",
"_CHECK_STRING",
":",
"raise",
"ValueError",
"(",
"\"{}: not a valid index file\"",
".",
"format",
"(",
"fn",
")",
")",
"index",
"=",
"pd",
".",
"read_csv",
"(",
"io",
".",
"StringIO",
"(",
"zlib",
".",
"decompress",
"(",
"i_file",
".",
"read",
"(",
")",
")",
".",
"decode",
"(",
"encoding",
"=",
"\"utf-8\"",
")",
",",
")",
")",
"return",
"index"
]
| Reads index from file.
Args:
fn (str): the name of the file containing the index.
Returns:
pandas.DataFrame: the index of the file.
Before reading the index, we check the first couple of bytes to see if it
is a valid index file. | [
"Reads",
"index",
"from",
"file",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/impute2.py#L184-L206 | train |
rhayes777/PyAutoFit | autofit/tools/pipeline.py | make_path | def make_path(phase) -> str:
"""
Create the path to the folder at which the metadata and optimizer pickle should be saved
"""
return "{}/{}{}{}".format(conf.instance.output_path, phase.phase_path, phase.phase_name, phase.phase_tag) | python | def make_path(phase) -> str:
"""
Create the path to the folder at which the metadata and optimizer pickle should be saved
"""
return "{}/{}{}{}".format(conf.instance.output_path, phase.phase_path, phase.phase_name, phase.phase_tag) | [
"def",
"make_path",
"(",
"phase",
")",
"->",
"str",
":",
"return",
"\"{}/{}{}{}\"",
".",
"format",
"(",
"conf",
".",
"instance",
".",
"output_path",
",",
"phase",
".",
"phase_path",
",",
"phase",
".",
"phase_name",
",",
"phase",
".",
"phase_tag",
")"
]
| Create the path to the folder at which the metadata and optimizer pickle should be saved | [
"Create",
"the",
"path",
"to",
"the",
"folder",
"at",
"which",
"the",
"metadata",
"and",
"optimizer",
"pickle",
"should",
"be",
"saved"
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L177-L181 | train |
rhayes777/PyAutoFit | autofit/tools/pipeline.py | save_optimizer_for_phase | def save_optimizer_for_phase(phase):
"""
Save the optimizer associated with the phase as a pickle
"""
with open(make_optimizer_pickle_path(phase), "w+b") as f:
f.write(pickle.dumps(phase.optimizer)) | python | def save_optimizer_for_phase(phase):
"""
Save the optimizer associated with the phase as a pickle
"""
with open(make_optimizer_pickle_path(phase), "w+b") as f:
f.write(pickle.dumps(phase.optimizer)) | [
"def",
"save_optimizer_for_phase",
"(",
"phase",
")",
":",
"with",
"open",
"(",
"make_optimizer_pickle_path",
"(",
"phase",
")",
",",
"\"w+b\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"pickle",
".",
"dumps",
"(",
"phase",
".",
"optimizer",
")",
")"
]
| Save the optimizer associated with the phase as a pickle | [
"Save",
"the",
"optimizer",
"associated",
"with",
"the",
"phase",
"as",
"a",
"pickle"
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L184-L189 | train |
rhayes777/PyAutoFit | autofit/tools/pipeline.py | assert_optimizer_pickle_matches_for_phase | def assert_optimizer_pickle_matches_for_phase(phase):
"""
Assert that the previously saved optimizer is equal to the phase's optimizer if a saved optimizer is found.
Parameters
----------
phase
The phase
Raises
-------
exc.PipelineException
"""
path = make_optimizer_pickle_path(phase)
if os.path.exists(path):
with open(path, "r+b") as f:
loaded_optimizer = pickle.loads(f.read())
if phase.optimizer != loaded_optimizer:
raise exc.PipelineException(
f"Can't restart phase at path {path} because settings don't match. "
f"Did you change the optimizer settings or model?") | python | def assert_optimizer_pickle_matches_for_phase(phase):
"""
Assert that the previously saved optimizer is equal to the phase's optimizer if a saved optimizer is found.
Parameters
----------
phase
The phase
Raises
-------
exc.PipelineException
"""
path = make_optimizer_pickle_path(phase)
if os.path.exists(path):
with open(path, "r+b") as f:
loaded_optimizer = pickle.loads(f.read())
if phase.optimizer != loaded_optimizer:
raise exc.PipelineException(
f"Can't restart phase at path {path} because settings don't match. "
f"Did you change the optimizer settings or model?") | [
"def",
"assert_optimizer_pickle_matches_for_phase",
"(",
"phase",
")",
":",
"path",
"=",
"make_optimizer_pickle_path",
"(",
"phase",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"\"r+b\"",
")",
"as",
"f",
":",
"loaded_optimizer",
"=",
"pickle",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"if",
"phase",
".",
"optimizer",
"!=",
"loaded_optimizer",
":",
"raise",
"exc",
".",
"PipelineException",
"(",
"f\"Can't restart phase at path {path} because settings don't match. \"",
"f\"Did you change the optimizer settings or model?\"",
")"
]
| Assert that the previously saved optimizer is equal to the phase's optimizer if a saved optimizer is found.
Parameters
----------
phase
The phase
Raises
-------
exc.PipelineException | [
"Assert",
"that",
"the",
"previously",
"saved",
"optimizer",
"is",
"equal",
"to",
"the",
"phase",
"s",
"optimizer",
"if",
"a",
"saved",
"optimizer",
"is",
"found",
"."
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L192-L212 | train |
rhayes777/PyAutoFit | autofit/tools/pipeline.py | ResultsCollection.add | def add(self, phase_name, result):
"""
Add the result of a phase.
Parameters
----------
phase_name: str
The name of the phase
result
The result of that phase
"""
if phase_name in self.__result_dict:
raise exc.PipelineException(
"Results from a phase called {} already exist in the pipeline".format(phase_name))
self.__result_list.append(result)
self.__result_dict[phase_name] = result | python | def add(self, phase_name, result):
"""
Add the result of a phase.
Parameters
----------
phase_name: str
The name of the phase
result
The result of that phase
"""
if phase_name in self.__result_dict:
raise exc.PipelineException(
"Results from a phase called {} already exist in the pipeline".format(phase_name))
self.__result_list.append(result)
self.__result_dict[phase_name] = result | [
"def",
"add",
"(",
"self",
",",
"phase_name",
",",
"result",
")",
":",
"if",
"phase_name",
"in",
"self",
".",
"__result_dict",
":",
"raise",
"exc",
".",
"PipelineException",
"(",
"\"Results from a phase called {} already exist in the pipeline\"",
".",
"format",
"(",
"phase_name",
")",
")",
"self",
".",
"__result_list",
".",
"append",
"(",
"result",
")",
"self",
".",
"__result_dict",
"[",
"phase_name",
"]",
"=",
"result"
]
| Add the result of a phase.
Parameters
----------
phase_name: str
The name of the phase
result
The result of that phase | [
"Add",
"the",
"result",
"of",
"a",
"phase",
"."
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L38-L53 | train |
rhayes777/PyAutoFit | autofit/tools/pipeline.py | ResultsCollection.from_phase | def from_phase(self, phase_name):
"""
Returns the result of a previous phase by its name
Parameters
----------
phase_name: str
The name of a previous phase
Returns
-------
result: Result
The result of that phase
Raises
------
exc.PipelineException
If no phase with the expected result is found
"""
try:
return self.__result_dict[phase_name]
except KeyError:
raise exc.PipelineException("No previous phase named {} found in results ({})".format(phase_name, ", ".join(
self.__result_dict.keys()))) | python | def from_phase(self, phase_name):
"""
Returns the result of a previous phase by its name
Parameters
----------
phase_name: str
The name of a previous phase
Returns
-------
result: Result
The result of that phase
Raises
------
exc.PipelineException
If no phase with the expected result is found
"""
try:
return self.__result_dict[phase_name]
except KeyError:
raise exc.PipelineException("No previous phase named {} found in results ({})".format(phase_name, ", ".join(
self.__result_dict.keys()))) | [
"def",
"from_phase",
"(",
"self",
",",
"phase_name",
")",
":",
"try",
":",
"return",
"self",
".",
"__result_dict",
"[",
"phase_name",
"]",
"except",
"KeyError",
":",
"raise",
"exc",
".",
"PipelineException",
"(",
"\"No previous phase named {} found in results ({})\"",
".",
"format",
"(",
"phase_name",
",",
"\", \"",
".",
"join",
"(",
"self",
".",
"__result_dict",
".",
"keys",
"(",
")",
")",
")",
")"
]
| Returns the result of a previous phase by its name
Parameters
----------
phase_name: str
The name of a previous phase
Returns
-------
result: Result
The result of that phase
Raises
------
exc.PipelineException
If no phase with the expected result is found | [
"Returns",
"the",
"result",
"of",
"a",
"previous",
"phase",
"by",
"its",
"name"
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L74-L97 | train |
rhayes777/PyAutoFit | autofit/tools/pipeline.py | Pipeline.save_metadata | def save_metadata(self, phase, data_name):
"""
Save metadata associated with the phase, such as the name of the pipeline, the name of the phase and the name
of the data being fit
"""
with open("{}/.metadata".format(make_path(phase)), "w+") as f:
f.write("pipeline={}\nphase={}\ndata={}".format(self.pipeline_name, phase.phase_name,
data_name)) | python | def save_metadata(self, phase, data_name):
"""
Save metadata associated with the phase, such as the name of the pipeline, the name of the phase and the name
of the data being fit
"""
with open("{}/.metadata".format(make_path(phase)), "w+") as f:
f.write("pipeline={}\nphase={}\ndata={}".format(self.pipeline_name, phase.phase_name,
data_name)) | [
"def",
"save_metadata",
"(",
"self",
",",
"phase",
",",
"data_name",
")",
":",
"with",
"open",
"(",
"\"{}/.metadata\"",
".",
"format",
"(",
"make_path",
"(",
"phase",
")",
")",
",",
"\"w+\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"pipeline={}\\nphase={}\\ndata={}\"",
".",
"format",
"(",
"self",
".",
"pipeline_name",
",",
"phase",
".",
"phase_name",
",",
"data_name",
")",
")"
]
| Save metadata associated with the phase, such as the name of the pipeline, the name of the phase and the name
of the data being fit | [
"Save",
"metadata",
"associated",
"with",
"the",
"phase",
"such",
"as",
"the",
"name",
"of",
"the",
"pipeline",
"the",
"name",
"of",
"the",
"phase",
"and",
"the",
"name",
"of",
"the",
"data",
"being",
"fit"
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L134-L141 | train |
rhayes777/PyAutoFit | autofit/tools/pipeline.py | Pipeline.run_function | def run_function(self, func, data_name=None, assert_optimizer_pickle_matches=True):
"""
Run the function for each phase in the pipeline.
Parameters
----------
assert_optimizer_pickle_matches
data_name
func
A function that takes a phase and prior results, returning results for that phase
Returns
-------
results: ResultsCollection
A collection of results
"""
results = ResultsCollection()
for i, phase in enumerate(self.phases):
logger.info("Running Phase {} (Number {})".format(phase.optimizer.phase_name, i))
if assert_optimizer_pickle_matches:
assert_optimizer_pickle_matches_for_phase(phase)
save_optimizer_for_phase(phase)
self.save_metadata(phase, data_name)
results.add(phase.phase_name, func(phase, results))
return results | python | def run_function(self, func, data_name=None, assert_optimizer_pickle_matches=True):
"""
Run the function for each phase in the pipeline.
Parameters
----------
assert_optimizer_pickle_matches
data_name
func
A function that takes a phase and prior results, returning results for that phase
Returns
-------
results: ResultsCollection
A collection of results
"""
results = ResultsCollection()
for i, phase in enumerate(self.phases):
logger.info("Running Phase {} (Number {})".format(phase.optimizer.phase_name, i))
if assert_optimizer_pickle_matches:
assert_optimizer_pickle_matches_for_phase(phase)
save_optimizer_for_phase(phase)
self.save_metadata(phase, data_name)
results.add(phase.phase_name, func(phase, results))
return results | [
"def",
"run_function",
"(",
"self",
",",
"func",
",",
"data_name",
"=",
"None",
",",
"assert_optimizer_pickle_matches",
"=",
"True",
")",
":",
"results",
"=",
"ResultsCollection",
"(",
")",
"for",
"i",
",",
"phase",
"in",
"enumerate",
"(",
"self",
".",
"phases",
")",
":",
"logger",
".",
"info",
"(",
"\"Running Phase {} (Number {})\"",
".",
"format",
"(",
"phase",
".",
"optimizer",
".",
"phase_name",
",",
"i",
")",
")",
"if",
"assert_optimizer_pickle_matches",
":",
"assert_optimizer_pickle_matches_for_phase",
"(",
"phase",
")",
"save_optimizer_for_phase",
"(",
"phase",
")",
"self",
".",
"save_metadata",
"(",
"phase",
",",
"data_name",
")",
"results",
".",
"add",
"(",
"phase",
".",
"phase_name",
",",
"func",
"(",
"phase",
",",
"results",
")",
")",
"return",
"results"
]
| Run the function for each phase in the pipeline.
Parameters
----------
assert_optimizer_pickle_matches
data_name
func
A function that takes a phase and prior results, returning results for that phase
Returns
-------
results: ResultsCollection
A collection of results | [
"Run",
"the",
"function",
"for",
"each",
"phase",
"in",
"the",
"pipeline",
"."
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L143-L167 | train |
Riminder/python-riminder-api | riminder/bytesutils.py | strtobytes | def strtobytes(input, encoding):
"""Take a str and transform it into a byte array."""
py_version = sys.version_info[0]
if py_version >= 3:
return _strtobytes_py3(input, encoding)
return _strtobytes_py2(input, encoding) | python | def strtobytes(input, encoding):
"""Take a str and transform it into a byte array."""
py_version = sys.version_info[0]
if py_version >= 3:
return _strtobytes_py3(input, encoding)
return _strtobytes_py2(input, encoding) | [
"def",
"strtobytes",
"(",
"input",
",",
"encoding",
")",
":",
"py_version",
"=",
"sys",
".",
"version_info",
"[",
"0",
"]",
"if",
"py_version",
">=",
"3",
":",
"return",
"_strtobytes_py3",
"(",
"input",
",",
"encoding",
")",
"return",
"_strtobytes_py2",
"(",
"input",
",",
"encoding",
")"
]
| Take a str and transform it into a byte array. | [
"Take",
"a",
"str",
"and",
"transform",
"it",
"into",
"a",
"byte",
"array",
"."
]
| 01279f0ece08cf3d1dd45f76de6d9edf7fafec90 | https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/bytesutils.py#L14-L19 | train |
pgxcentre/geneparse | geneparse/index/__main__.py | index_impute2 | def index_impute2(fn):
"""Indexes an IMPUTE2 file.
Args:
fn (str): The name of the IMPUTE2 file.
"""
logger.info("Indexing {} (IMPUTE2)".format(fn))
impute2_index(fn, cols=[0, 1, 2], names=["chrom", "name", "pos"], sep=" ")
logger.info("Index generated") | python | def index_impute2(fn):
"""Indexes an IMPUTE2 file.
Args:
fn (str): The name of the IMPUTE2 file.
"""
logger.info("Indexing {} (IMPUTE2)".format(fn))
impute2_index(fn, cols=[0, 1, 2], names=["chrom", "name", "pos"], sep=" ")
logger.info("Index generated") | [
"def",
"index_impute2",
"(",
"fn",
")",
":",
"logger",
".",
"info",
"(",
"\"Indexing {} (IMPUTE2)\"",
".",
"format",
"(",
"fn",
")",
")",
"impute2_index",
"(",
"fn",
",",
"cols",
"=",
"[",
"0",
",",
"1",
",",
"2",
"]",
",",
"names",
"=",
"[",
"\"chrom\"",
",",
"\"name\"",
",",
"\"pos\"",
"]",
",",
"sep",
"=",
"\" \"",
")",
"logger",
".",
"info",
"(",
"\"Index generated\"",
")"
]
| Indexes an IMPUTE2 file.
Args:
fn (str): The name of the IMPUTE2 file. | [
"Indexes",
"an",
"IMPUTE2",
"file",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/__main__.py#L60-L69 | train |
pgxcentre/geneparse | geneparse/index/__main__.py | index_bgen | def index_bgen(fn, legacy=False):
"""Indexes a BGEN file.
Args:
fn (str): The name of the BGEN file.
"""
logger.info("Indexing {} (BGEN) using 'bgenix'{}".format(
fn, " (legacy mode)" if legacy else "",
))
command = ["bgenix", "-g", fn, "-index"]
if legacy:
command.append("-with-rowid")
try:
logger.info("Executing '{}'".format(" ".join(command)))
subprocess.Popen(command).communicate()
except FileNotFoundError:
logger.error("Cannot find 'bgenix', impossible to index {}".format(fn))
sys.exit(1)
logger.info("Index generated") | python | def index_bgen(fn, legacy=False):
"""Indexes a BGEN file.
Args:
fn (str): The name of the BGEN file.
"""
logger.info("Indexing {} (BGEN) using 'bgenix'{}".format(
fn, " (legacy mode)" if legacy else "",
))
command = ["bgenix", "-g", fn, "-index"]
if legacy:
command.append("-with-rowid")
try:
logger.info("Executing '{}'".format(" ".join(command)))
subprocess.Popen(command).communicate()
except FileNotFoundError:
logger.error("Cannot find 'bgenix', impossible to index {}".format(fn))
sys.exit(1)
logger.info("Index generated") | [
"def",
"index_bgen",
"(",
"fn",
",",
"legacy",
"=",
"False",
")",
":",
"logger",
".",
"info",
"(",
"\"Indexing {} (BGEN) using 'bgenix'{}\"",
".",
"format",
"(",
"fn",
",",
"\" (legacy mode)\"",
"if",
"legacy",
"else",
"\"\"",
",",
")",
")",
"command",
"=",
"[",
"\"bgenix\"",
",",
"\"-g\"",
",",
"fn",
",",
"\"-index\"",
"]",
"if",
"legacy",
":",
"command",
".",
"append",
"(",
"\"-with-rowid\"",
")",
"try",
":",
"logger",
".",
"info",
"(",
"\"Executing '{}'\"",
".",
"format",
"(",
"\" \"",
".",
"join",
"(",
"command",
")",
")",
")",
"subprocess",
".",
"Popen",
"(",
"command",
")",
".",
"communicate",
"(",
")",
"except",
"FileNotFoundError",
":",
"logger",
".",
"error",
"(",
"\"Cannot find 'bgenix', impossible to index {}\"",
".",
"format",
"(",
"fn",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"logger",
".",
"info",
"(",
"\"Index generated\"",
")"
]
| Indexes a BGEN file.
Args:
fn (str): The name of the BGEN file. | [
"Indexes",
"a",
"BGEN",
"file",
"."
]
| f698f9708af4c7962d384a70a5a14006b1cb7108 | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/__main__.py#L72-L91 | train |
unt-libraries/pyuntl | pyuntl/untl_structure.py | create_untl_xml_subelement | def create_untl_xml_subelement(parent, element, prefix=''):
"""Create a UNTL XML subelement."""
subelement = SubElement(parent, prefix + element.tag)
if element.content is not None:
subelement.text = element.content
if element.qualifier is not None:
subelement.attrib["qualifier"] = element.qualifier
if element.children > 0:
for child in element.children:
SubElement(subelement, prefix + child.tag).text = child.content
else:
subelement.text = element.content
return subelement | python | def create_untl_xml_subelement(parent, element, prefix=''):
"""Create a UNTL XML subelement."""
subelement = SubElement(parent, prefix + element.tag)
if element.content is not None:
subelement.text = element.content
if element.qualifier is not None:
subelement.attrib["qualifier"] = element.qualifier
if element.children > 0:
for child in element.children:
SubElement(subelement, prefix + child.tag).text = child.content
else:
subelement.text = element.content
return subelement | [
"def",
"create_untl_xml_subelement",
"(",
"parent",
",",
"element",
",",
"prefix",
"=",
"''",
")",
":",
"subelement",
"=",
"SubElement",
"(",
"parent",
",",
"prefix",
"+",
"element",
".",
"tag",
")",
"if",
"element",
".",
"content",
"is",
"not",
"None",
":",
"subelement",
".",
"text",
"=",
"element",
".",
"content",
"if",
"element",
".",
"qualifier",
"is",
"not",
"None",
":",
"subelement",
".",
"attrib",
"[",
"\"qualifier\"",
"]",
"=",
"element",
".",
"qualifier",
"if",
"element",
".",
"children",
">",
"0",
":",
"for",
"child",
"in",
"element",
".",
"children",
":",
"SubElement",
"(",
"subelement",
",",
"prefix",
"+",
"child",
".",
"tag",
")",
".",
"text",
"=",
"child",
".",
"content",
"else",
":",
"subelement",
".",
"text",
"=",
"element",
".",
"content",
"return",
"subelement"
]
| Create a UNTL XML subelement. | [
"Create",
"a",
"UNTL",
"XML",
"subelement",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L22-L35 | train |
unt-libraries/pyuntl | pyuntl/untl_structure.py | add_missing_children | def add_missing_children(required_children, element_children):
"""Determine if there are elements not in the children
that need to be included as blank elements in the form.
"""
element_tags = [element.tag for element in element_children]
# Loop through the elements that should be in the form.
for contained_element in required_children:
# If the element doesn't exist in the form,
# add the element to the children.
if contained_element not in element_tags:
try:
added_child = PYUNTL_DISPATCH[contained_element](content='')
except:
added_child = PYUNTL_DISPATCH[contained_element]()
element_children.append(added_child)
return element_children | python | def add_missing_children(required_children, element_children):
"""Determine if there are elements not in the children
that need to be included as blank elements in the form.
"""
element_tags = [element.tag for element in element_children]
# Loop through the elements that should be in the form.
for contained_element in required_children:
# If the element doesn't exist in the form,
# add the element to the children.
if contained_element not in element_tags:
try:
added_child = PYUNTL_DISPATCH[contained_element](content='')
except:
added_child = PYUNTL_DISPATCH[contained_element]()
element_children.append(added_child)
return element_children | [
"def",
"add_missing_children",
"(",
"required_children",
",",
"element_children",
")",
":",
"element_tags",
"=",
"[",
"element",
".",
"tag",
"for",
"element",
"in",
"element_children",
"]",
"# Loop through the elements that should be in the form.",
"for",
"contained_element",
"in",
"required_children",
":",
"# If the element doesn't exist in the form,",
"# add the element to the children.",
"if",
"contained_element",
"not",
"in",
"element_tags",
":",
"try",
":",
"added_child",
"=",
"PYUNTL_DISPATCH",
"[",
"contained_element",
"]",
"(",
"content",
"=",
"''",
")",
"except",
":",
"added_child",
"=",
"PYUNTL_DISPATCH",
"[",
"contained_element",
"]",
"(",
")",
"element_children",
".",
"append",
"(",
"added_child",
")",
"return",
"element_children"
]
| Determine if there are elements not in the children
that need to be included as blank elements in the form. | [
"Determine",
"if",
"there",
"are",
"elements",
"not",
"in",
"the",
"children",
"that",
"need",
"to",
"be",
"included",
"as",
"blank",
"elements",
"in",
"the",
"form",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L38-L53 | train |
unt-libraries/pyuntl | pyuntl/untl_structure.py | UNTLElement.set_qualifier | def set_qualifier(self, value):
"""Set the qualifier for the element.
Verifies the element is allowed to have a qualifier, and
throws an exception if not.
"""
if self.allows_qualifier:
self.qualifier = value.strip()
else:
raise UNTLStructureException(
'Element "%s" does not allow a qualifier' % (self.tag,)
) | python | def set_qualifier(self, value):
"""Set the qualifier for the element.
Verifies the element is allowed to have a qualifier, and
throws an exception if not.
"""
if self.allows_qualifier:
self.qualifier = value.strip()
else:
raise UNTLStructureException(
'Element "%s" does not allow a qualifier' % (self.tag,)
) | [
"def",
"set_qualifier",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"allows_qualifier",
":",
"self",
".",
"qualifier",
"=",
"value",
".",
"strip",
"(",
")",
"else",
":",
"raise",
"UNTLStructureException",
"(",
"'Element \"%s\" does not allow a qualifier'",
"%",
"(",
"self",
".",
"tag",
",",
")",
")"
]
| Set the qualifier for the element.
Verifies the element is allowed to have a qualifier, and
throws an exception if not. | [
"Set",
"the",
"qualifier",
"for",
"the",
"element",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L85-L96 | train |
unt-libraries/pyuntl | pyuntl/untl_structure.py | UNTLElement.add_form | def add_form(self, **kwargs):
"""Add the form attribute to the UNTL Python object."""
vocabularies = kwargs.get('vocabularies', None)
qualifier = kwargs.get('qualifier', None)
content = kwargs.get('content', None)
parent_tag = kwargs.get('parent_tag', None)
superuser = kwargs.get('superuser', False)
# Element has both the qualifier and content.
if qualifier is not None and content is not None:
# Create the form attribute.
self.form = UNTL_FORM_DISPATCH[self.tag](
vocabularies=vocabularies,
qualifier_value=qualifier,
input_value=content,
untl_object=self,
superuser=superuser,
)
# Element just has a qualifier.
elif qualifier is not None:
# Create the form attribute.
self.form = UNTL_FORM_DISPATCH[self.tag](
vocabularies=vocabularies,
qualifier_value=qualifier,
untl_object=self,
superuser=superuser,
)
# Element just has content.
elif content is not None:
# If the element is a child element,
# create the form attribute.
if parent_tag is None:
self.form = UNTL_FORM_DISPATCH[self.tag](
vocabularies=vocabularies,
input_value=content,
untl_object=self,
superuser=superuser,
)
else:
# Create the form attribute.
self.form = UNTL_FORM_DISPATCH[self.tag](
vocabularies=vocabularies,
input_value=content,
untl_object=self,
parent_tag=parent_tag,
superuser=superuser,
)
# Element has children and no qualifiers or content
# or is blank (not originally in the UNTL record).
else:
# Element is a child element.
if parent_tag is None:
# Create the form attribute.
self.form = UNTL_FORM_DISPATCH[self.tag](
vocabularies=vocabularies,
untl_object=self,
superuser=superuser,
)
else:
# Create the form attribute.
self.form = UNTL_FORM_DISPATCH[self.tag](
vocabularies=vocabularies,
untl_object=self,
parent_tag=parent_tag,
superuser=superuser,
) | python | def add_form(self, **kwargs):
"""Add the form attribute to the UNTL Python object."""
vocabularies = kwargs.get('vocabularies', None)
qualifier = kwargs.get('qualifier', None)
content = kwargs.get('content', None)
parent_tag = kwargs.get('parent_tag', None)
superuser = kwargs.get('superuser', False)
# Element has both the qualifier and content.
if qualifier is not None and content is not None:
# Create the form attribute.
self.form = UNTL_FORM_DISPATCH[self.tag](
vocabularies=vocabularies,
qualifier_value=qualifier,
input_value=content,
untl_object=self,
superuser=superuser,
)
# Element just has a qualifier.
elif qualifier is not None:
# Create the form attribute.
self.form = UNTL_FORM_DISPATCH[self.tag](
vocabularies=vocabularies,
qualifier_value=qualifier,
untl_object=self,
superuser=superuser,
)
# Element just has content.
elif content is not None:
# If the element is a child element,
# create the form attribute.
if parent_tag is None:
self.form = UNTL_FORM_DISPATCH[self.tag](
vocabularies=vocabularies,
input_value=content,
untl_object=self,
superuser=superuser,
)
else:
# Create the form attribute.
self.form = UNTL_FORM_DISPATCH[self.tag](
vocabularies=vocabularies,
input_value=content,
untl_object=self,
parent_tag=parent_tag,
superuser=superuser,
)
# Element has children and no qualifiers or content
# or is blank (not originally in the UNTL record).
else:
# Element is a child element.
if parent_tag is None:
# Create the form attribute.
self.form = UNTL_FORM_DISPATCH[self.tag](
vocabularies=vocabularies,
untl_object=self,
superuser=superuser,
)
else:
# Create the form attribute.
self.form = UNTL_FORM_DISPATCH[self.tag](
vocabularies=vocabularies,
untl_object=self,
parent_tag=parent_tag,
superuser=superuser,
) | [
"def",
"add_form",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"vocabularies",
"=",
"kwargs",
".",
"get",
"(",
"'vocabularies'",
",",
"None",
")",
"qualifier",
"=",
"kwargs",
".",
"get",
"(",
"'qualifier'",
",",
"None",
")",
"content",
"=",
"kwargs",
".",
"get",
"(",
"'content'",
",",
"None",
")",
"parent_tag",
"=",
"kwargs",
".",
"get",
"(",
"'parent_tag'",
",",
"None",
")",
"superuser",
"=",
"kwargs",
".",
"get",
"(",
"'superuser'",
",",
"False",
")",
"# Element has both the qualifier and content.",
"if",
"qualifier",
"is",
"not",
"None",
"and",
"content",
"is",
"not",
"None",
":",
"# Create the form attribute.",
"self",
".",
"form",
"=",
"UNTL_FORM_DISPATCH",
"[",
"self",
".",
"tag",
"]",
"(",
"vocabularies",
"=",
"vocabularies",
",",
"qualifier_value",
"=",
"qualifier",
",",
"input_value",
"=",
"content",
",",
"untl_object",
"=",
"self",
",",
"superuser",
"=",
"superuser",
",",
")",
"# Element just has a qualifier.",
"elif",
"qualifier",
"is",
"not",
"None",
":",
"# Create the form attribute.",
"self",
".",
"form",
"=",
"UNTL_FORM_DISPATCH",
"[",
"self",
".",
"tag",
"]",
"(",
"vocabularies",
"=",
"vocabularies",
",",
"qualifier_value",
"=",
"qualifier",
",",
"untl_object",
"=",
"self",
",",
"superuser",
"=",
"superuser",
",",
")",
"# Element just has content.",
"elif",
"content",
"is",
"not",
"None",
":",
"# If the element is a child element,",
"# create the form attribute.",
"if",
"parent_tag",
"is",
"None",
":",
"self",
".",
"form",
"=",
"UNTL_FORM_DISPATCH",
"[",
"self",
".",
"tag",
"]",
"(",
"vocabularies",
"=",
"vocabularies",
",",
"input_value",
"=",
"content",
",",
"untl_object",
"=",
"self",
",",
"superuser",
"=",
"superuser",
",",
")",
"else",
":",
"# Create the form attribute.",
"self",
".",
"form",
"=",
"UNTL_FORM_DISPATCH",
"[",
"self",
".",
"tag",
"]",
"(",
"vocabularies",
"=",
"vocabularies",
",",
"input_value",
"=",
"content",
",",
"untl_object",
"=",
"self",
",",
"parent_tag",
"=",
"parent_tag",
",",
"superuser",
"=",
"superuser",
",",
")",
"# Element has children and no qualifiers or content",
"# or is blank (not originally in the UNTL record).",
"else",
":",
"# Element is a child element.",
"if",
"parent_tag",
"is",
"None",
":",
"# Create the form attribute.",
"self",
".",
"form",
"=",
"UNTL_FORM_DISPATCH",
"[",
"self",
".",
"tag",
"]",
"(",
"vocabularies",
"=",
"vocabularies",
",",
"untl_object",
"=",
"self",
",",
"superuser",
"=",
"superuser",
",",
")",
"else",
":",
"# Create the form attribute.",
"self",
".",
"form",
"=",
"UNTL_FORM_DISPATCH",
"[",
"self",
".",
"tag",
"]",
"(",
"vocabularies",
"=",
"vocabularies",
",",
"untl_object",
"=",
"self",
",",
"parent_tag",
"=",
"parent_tag",
",",
"superuser",
"=",
"superuser",
",",
")"
]
| Add the form attribute to the UNTL Python object. | [
"Add",
"the",
"form",
"attribute",
"to",
"the",
"UNTL",
"Python",
"object",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L127-L191 | train |
unt-libraries/pyuntl | pyuntl/untl_structure.py | UNTLElement.record_content_length | def record_content_length(self):
"""Calculate length of record, excluding metadata."""
untldict = py2dict(self)
untldict.pop('meta', None)
return len(str(untldict)) | python | def record_content_length(self):
"""Calculate length of record, excluding metadata."""
untldict = py2dict(self)
untldict.pop('meta', None)
return len(str(untldict)) | [
"def",
"record_content_length",
"(",
"self",
")",
":",
"untldict",
"=",
"py2dict",
"(",
"self",
")",
"untldict",
".",
"pop",
"(",
"'meta'",
",",
"None",
")",
"return",
"len",
"(",
"str",
"(",
"untldict",
")",
")"
]
| Calculate length of record, excluding metadata. | [
"Calculate",
"length",
"of",
"record",
"excluding",
"metadata",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L204-L208 | train |
unt-libraries/pyuntl | pyuntl/untl_structure.py | FormGenerator.create_form_data | def create_form_data(self, **kwargs):
"""Create groupings of form elements."""
# Get the specified keyword arguments.
children = kwargs.get('children', [])
sort_order = kwargs.get('sort_order', None)
solr_response = kwargs.get('solr_response', None)
superuser = kwargs.get('superuser', False)
# Get the vocabularies to pull the qualifiers from.
vocabularies = self.get_vocabularies()
# Loop through all UNTL elements in the Python object.
for element in children:
# Add children that are missing from the form.
element.children = add_missing_children(
element.contained_children,
element.children,
)
# Add the form attribute to the element.
element.add_form(
vocabularies=vocabularies,
qualifier=element.qualifier,
content=element.content,
superuser=superuser,
)
# Element can contain children.
if element.form.has_children:
# If the parent has a qualifier,
# create a representative form element for the parent.
if getattr(element.form, 'qualifier_name', False):
add_parent = PARENT_FORM[element.form.qualifier_name](
content=element.qualifier,
)
# Add the parent to the list of child elements.
element.children.append(add_parent)
# Sort the elements by the index of child sort.
element.children.sort(
key=lambda obj: element.form.child_sort.index(obj.tag)
)
# Loop through the element's children (if it has any).
for child in element.children:
# Add the form attribute to the element.
child.add_form(
vocabularies=vocabularies,
qualifier=child.qualifier,
content=child.content,
parent_tag=element.tag,
superuser=superuser,
)
element_group_dict = {}
# Group related objects together.
for element in children:
# Make meta-hidden its own group.
if element.form.name == 'meta' and element.qualifier == 'hidden':
element_group_dict['hidden'] = [element]
# Element is not meta-hidden.
else:
# Make sure the dictionary key exists.
if element.form.name not in element_group_dict:
element_group_dict[element.form.name] = []
element_group_dict[element.form.name].append(element)
# If the hidden meta element doesn't exist, add it to its own group.
if 'hidden' not in element_group_dict:
hidden_element = PYUNTL_DISPATCH['meta'](
qualifier='hidden',
content='False')
hidden_element.add_form(
vocabularies=vocabularies,
qualifier=hidden_element.qualifier,
content=hidden_element.content,
superuser=superuser,
)
element_group_dict['hidden'] = [hidden_element]
# Create a list of group object elements.
element_list = self.create_form_groupings(
vocabularies,
solr_response,
element_group_dict,
sort_order,
)
# Return the list of UNTL elements with form data added.
return element_list | python | def create_form_data(self, **kwargs):
"""Create groupings of form elements."""
# Get the specified keyword arguments.
children = kwargs.get('children', [])
sort_order = kwargs.get('sort_order', None)
solr_response = kwargs.get('solr_response', None)
superuser = kwargs.get('superuser', False)
# Get the vocabularies to pull the qualifiers from.
vocabularies = self.get_vocabularies()
# Loop through all UNTL elements in the Python object.
for element in children:
# Add children that are missing from the form.
element.children = add_missing_children(
element.contained_children,
element.children,
)
# Add the form attribute to the element.
element.add_form(
vocabularies=vocabularies,
qualifier=element.qualifier,
content=element.content,
superuser=superuser,
)
# Element can contain children.
if element.form.has_children:
# If the parent has a qualifier,
# create a representative form element for the parent.
if getattr(element.form, 'qualifier_name', False):
add_parent = PARENT_FORM[element.form.qualifier_name](
content=element.qualifier,
)
# Add the parent to the list of child elements.
element.children.append(add_parent)
# Sort the elements by the index of child sort.
element.children.sort(
key=lambda obj: element.form.child_sort.index(obj.tag)
)
# Loop through the element's children (if it has any).
for child in element.children:
# Add the form attribute to the element.
child.add_form(
vocabularies=vocabularies,
qualifier=child.qualifier,
content=child.content,
parent_tag=element.tag,
superuser=superuser,
)
element_group_dict = {}
# Group related objects together.
for element in children:
# Make meta-hidden its own group.
if element.form.name == 'meta' and element.qualifier == 'hidden':
element_group_dict['hidden'] = [element]
# Element is not meta-hidden.
else:
# Make sure the dictionary key exists.
if element.form.name not in element_group_dict:
element_group_dict[element.form.name] = []
element_group_dict[element.form.name].append(element)
# If the hidden meta element doesn't exist, add it to its own group.
if 'hidden' not in element_group_dict:
hidden_element = PYUNTL_DISPATCH['meta'](
qualifier='hidden',
content='False')
hidden_element.add_form(
vocabularies=vocabularies,
qualifier=hidden_element.qualifier,
content=hidden_element.content,
superuser=superuser,
)
element_group_dict['hidden'] = [hidden_element]
# Create a list of group object elements.
element_list = self.create_form_groupings(
vocabularies,
solr_response,
element_group_dict,
sort_order,
)
# Return the list of UNTL elements with form data added.
return element_list | [
"def",
"create_form_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get the specified keyword arguments.",
"children",
"=",
"kwargs",
".",
"get",
"(",
"'children'",
",",
"[",
"]",
")",
"sort_order",
"=",
"kwargs",
".",
"get",
"(",
"'sort_order'",
",",
"None",
")",
"solr_response",
"=",
"kwargs",
".",
"get",
"(",
"'solr_response'",
",",
"None",
")",
"superuser",
"=",
"kwargs",
".",
"get",
"(",
"'superuser'",
",",
"False",
")",
"# Get the vocabularies to pull the qualifiers from.",
"vocabularies",
"=",
"self",
".",
"get_vocabularies",
"(",
")",
"# Loop through all UNTL elements in the Python object.",
"for",
"element",
"in",
"children",
":",
"# Add children that are missing from the form.",
"element",
".",
"children",
"=",
"add_missing_children",
"(",
"element",
".",
"contained_children",
",",
"element",
".",
"children",
",",
")",
"# Add the form attribute to the element.",
"element",
".",
"add_form",
"(",
"vocabularies",
"=",
"vocabularies",
",",
"qualifier",
"=",
"element",
".",
"qualifier",
",",
"content",
"=",
"element",
".",
"content",
",",
"superuser",
"=",
"superuser",
",",
")",
"# Element can contain children.",
"if",
"element",
".",
"form",
".",
"has_children",
":",
"# If the parent has a qualifier,",
"# create a representative form element for the parent.",
"if",
"getattr",
"(",
"element",
".",
"form",
",",
"'qualifier_name'",
",",
"False",
")",
":",
"add_parent",
"=",
"PARENT_FORM",
"[",
"element",
".",
"form",
".",
"qualifier_name",
"]",
"(",
"content",
"=",
"element",
".",
"qualifier",
",",
")",
"# Add the parent to the list of child elements.",
"element",
".",
"children",
".",
"append",
"(",
"add_parent",
")",
"# Sort the elements by the index of child sort.",
"element",
".",
"children",
".",
"sort",
"(",
"key",
"=",
"lambda",
"obj",
":",
"element",
".",
"form",
".",
"child_sort",
".",
"index",
"(",
"obj",
".",
"tag",
")",
")",
"# Loop through the element's children (if it has any).",
"for",
"child",
"in",
"element",
".",
"children",
":",
"# Add the form attribute to the element.",
"child",
".",
"add_form",
"(",
"vocabularies",
"=",
"vocabularies",
",",
"qualifier",
"=",
"child",
".",
"qualifier",
",",
"content",
"=",
"child",
".",
"content",
",",
"parent_tag",
"=",
"element",
".",
"tag",
",",
"superuser",
"=",
"superuser",
",",
")",
"element_group_dict",
"=",
"{",
"}",
"# Group related objects together.",
"for",
"element",
"in",
"children",
":",
"# Make meta-hidden its own group.",
"if",
"element",
".",
"form",
".",
"name",
"==",
"'meta'",
"and",
"element",
".",
"qualifier",
"==",
"'hidden'",
":",
"element_group_dict",
"[",
"'hidden'",
"]",
"=",
"[",
"element",
"]",
"# Element is not meta-hidden.",
"else",
":",
"# Make sure the dictionary key exists.",
"if",
"element",
".",
"form",
".",
"name",
"not",
"in",
"element_group_dict",
":",
"element_group_dict",
"[",
"element",
".",
"form",
".",
"name",
"]",
"=",
"[",
"]",
"element_group_dict",
"[",
"element",
".",
"form",
".",
"name",
"]",
".",
"append",
"(",
"element",
")",
"# If the hidden meta element doesn't exist, add it to its own group.",
"if",
"'hidden'",
"not",
"in",
"element_group_dict",
":",
"hidden_element",
"=",
"PYUNTL_DISPATCH",
"[",
"'meta'",
"]",
"(",
"qualifier",
"=",
"'hidden'",
",",
"content",
"=",
"'False'",
")",
"hidden_element",
".",
"add_form",
"(",
"vocabularies",
"=",
"vocabularies",
",",
"qualifier",
"=",
"hidden_element",
".",
"qualifier",
",",
"content",
"=",
"hidden_element",
".",
"content",
",",
"superuser",
"=",
"superuser",
",",
")",
"element_group_dict",
"[",
"'hidden'",
"]",
"=",
"[",
"hidden_element",
"]",
"# Create a list of group object elements.",
"element_list",
"=",
"self",
".",
"create_form_groupings",
"(",
"vocabularies",
",",
"solr_response",
",",
"element_group_dict",
",",
"sort_order",
",",
")",
"# Return the list of UNTL elements with form data added.",
"return",
"element_list"
]
| Create groupings of form elements. | [
"Create",
"groupings",
"of",
"form",
"elements",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L216-L295 | train |
unt-libraries/pyuntl | pyuntl/untl_structure.py | FormGenerator.create_form_groupings | def create_form_groupings(self,
vocabularies,
solr_response,
element_group_dict,
sort_order):
"""Create a group object from groupings of element objects."""
element_list = []
# Loop through the group dictionary.
for group_name, group_list in element_group_dict.items():
# Create the element group.
element_group = UNTL_GROUP_DISPATCH[group_name](
vocabularies=vocabularies,
solr_response=solr_response,
group_name=group_name,
group_list=group_list,
)
# Loop through the adjustable forms of the group if they exist.
if element_group.adjustable_form is not None:
for adj_name, form_dict in element_group.adjustable_form.items():
# If an item has an adjustable form,
# append it to the adjustable list.
if form_dict['value_py'] is not None:
self.adjustable_items.append(adj_name)
# Append the group to the element group list.
element_list.append(element_group)
# Sort the elements by the index of sort_order pre-ordered list.
element_list.sort(key=lambda obj: sort_order.index(obj.group_name))
return element_list | python | def create_form_groupings(self,
vocabularies,
solr_response,
element_group_dict,
sort_order):
"""Create a group object from groupings of element objects."""
element_list = []
# Loop through the group dictionary.
for group_name, group_list in element_group_dict.items():
# Create the element group.
element_group = UNTL_GROUP_DISPATCH[group_name](
vocabularies=vocabularies,
solr_response=solr_response,
group_name=group_name,
group_list=group_list,
)
# Loop through the adjustable forms of the group if they exist.
if element_group.adjustable_form is not None:
for adj_name, form_dict in element_group.adjustable_form.items():
# If an item has an adjustable form,
# append it to the adjustable list.
if form_dict['value_py'] is not None:
self.adjustable_items.append(adj_name)
# Append the group to the element group list.
element_list.append(element_group)
# Sort the elements by the index of sort_order pre-ordered list.
element_list.sort(key=lambda obj: sort_order.index(obj.group_name))
return element_list | [
"def",
"create_form_groupings",
"(",
"self",
",",
"vocabularies",
",",
"solr_response",
",",
"element_group_dict",
",",
"sort_order",
")",
":",
"element_list",
"=",
"[",
"]",
"# Loop through the group dictionary.",
"for",
"group_name",
",",
"group_list",
"in",
"element_group_dict",
".",
"items",
"(",
")",
":",
"# Create the element group.",
"element_group",
"=",
"UNTL_GROUP_DISPATCH",
"[",
"group_name",
"]",
"(",
"vocabularies",
"=",
"vocabularies",
",",
"solr_response",
"=",
"solr_response",
",",
"group_name",
"=",
"group_name",
",",
"group_list",
"=",
"group_list",
",",
")",
"# Loop through the adjustable forms of the group if they exist.",
"if",
"element_group",
".",
"adjustable_form",
"is",
"not",
"None",
":",
"for",
"adj_name",
",",
"form_dict",
"in",
"element_group",
".",
"adjustable_form",
".",
"items",
"(",
")",
":",
"# If an item has an adjustable form,",
"# append it to the adjustable list.",
"if",
"form_dict",
"[",
"'value_py'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"adjustable_items",
".",
"append",
"(",
"adj_name",
")",
"# Append the group to the element group list.",
"element_list",
".",
"append",
"(",
"element_group",
")",
"# Sort the elements by the index of sort_order pre-ordered list.",
"element_list",
".",
"sort",
"(",
"key",
"=",
"lambda",
"obj",
":",
"sort_order",
".",
"index",
"(",
"obj",
".",
"group_name",
")",
")",
"return",
"element_list"
]
| Create a group object from groupings of element objects. | [
"Create",
"a",
"group",
"object",
"from",
"groupings",
"of",
"element",
"objects",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L297-L324 | train |
unt-libraries/pyuntl | pyuntl/untl_structure.py | FormGenerator.get_vocabularies | def get_vocabularies(self):
"""Get the vocabularies to pull the qualifiers from."""
# Timeout in seconds.
timeout = 15
socket.setdefaulttimeout(timeout)
# Create the ordered vocabulary URL.
vocab_url = VOCABULARIES_URL.replace('all', 'all-verbose')
# Request the vocabularies dictionary.
try:
vocab_dict = eval(urllib2.urlopen(vocab_url).read())
except:
raise UNTLStructureException('Could not retrieve the vocabularies')
return vocab_dict | python | def get_vocabularies(self):
"""Get the vocabularies to pull the qualifiers from."""
# Timeout in seconds.
timeout = 15
socket.setdefaulttimeout(timeout)
# Create the ordered vocabulary URL.
vocab_url = VOCABULARIES_URL.replace('all', 'all-verbose')
# Request the vocabularies dictionary.
try:
vocab_dict = eval(urllib2.urlopen(vocab_url).read())
except:
raise UNTLStructureException('Could not retrieve the vocabularies')
return vocab_dict | [
"def",
"get_vocabularies",
"(",
"self",
")",
":",
"# Timeout in seconds.",
"timeout",
"=",
"15",
"socket",
".",
"setdefaulttimeout",
"(",
"timeout",
")",
"# Create the ordered vocabulary URL.",
"vocab_url",
"=",
"VOCABULARIES_URL",
".",
"replace",
"(",
"'all'",
",",
"'all-verbose'",
")",
"# Request the vocabularies dictionary.",
"try",
":",
"vocab_dict",
"=",
"eval",
"(",
"urllib2",
".",
"urlopen",
"(",
"vocab_url",
")",
".",
"read",
"(",
")",
")",
"except",
":",
"raise",
"UNTLStructureException",
"(",
"'Could not retrieve the vocabularies'",
")",
"return",
"vocab_dict"
]
| Get the vocabularies to pull the qualifiers from. | [
"Get",
"the",
"vocabularies",
"to",
"pull",
"the",
"qualifiers",
"from",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L326-L338 | train |
unt-libraries/pyuntl | pyuntl/untl_structure.py | Metadata.create_xml_string | def create_xml_string(self):
"""Create a UNTL document in a string from a UNTL metadata
root object.
untl_xml_string = metadata_root_object.create_xml_string()
"""
root = self.create_xml()
xml = '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring(
root, pretty_print=True
)
return xml | python | def create_xml_string(self):
"""Create a UNTL document in a string from a UNTL metadata
root object.
untl_xml_string = metadata_root_object.create_xml_string()
"""
root = self.create_xml()
xml = '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring(
root, pretty_print=True
)
return xml | [
"def",
"create_xml_string",
"(",
"self",
")",
":",
"root",
"=",
"self",
".",
"create_xml",
"(",
")",
"xml",
"=",
"'<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'",
"+",
"tostring",
"(",
"root",
",",
"pretty_print",
"=",
"True",
")",
"return",
"xml"
]
| Create a UNTL document in a string from a UNTL metadata
root object.
untl_xml_string = metadata_root_object.create_xml_string() | [
"Create",
"a",
"UNTL",
"document",
"in",
"a",
"string",
"from",
"a",
"UNTL",
"metadata",
"root",
"object",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L358-L369 | train |
unt-libraries/pyuntl | pyuntl/untl_structure.py | Metadata.create_xml | def create_xml(self, useNamespace=False):
"""Create an ElementTree representation of the object."""
UNTL_NAMESPACE = 'http://digital2.library.unt.edu/untl/'
UNTL = '{%s}' % UNTL_NAMESPACE
NSMAP = {'untl': UNTL_NAMESPACE}
if useNamespace:
root = Element(UNTL + self.tag, nsmap=NSMAP)
else:
root = Element(self.tag)
# Sort the elements by the index of
# UNTL_XML_ORDER pre-ordered list.
self.sort_untl(UNTL_XML_ORDER)
# Create an XML structure from field list.
for element in self.children:
if useNamespace:
create_untl_xml_subelement(root, element, UNTL)
else:
create_untl_xml_subelement(root, element)
return root | python | def create_xml(self, useNamespace=False):
"""Create an ElementTree representation of the object."""
UNTL_NAMESPACE = 'http://digital2.library.unt.edu/untl/'
UNTL = '{%s}' % UNTL_NAMESPACE
NSMAP = {'untl': UNTL_NAMESPACE}
if useNamespace:
root = Element(UNTL + self.tag, nsmap=NSMAP)
else:
root = Element(self.tag)
# Sort the elements by the index of
# UNTL_XML_ORDER pre-ordered list.
self.sort_untl(UNTL_XML_ORDER)
# Create an XML structure from field list.
for element in self.children:
if useNamespace:
create_untl_xml_subelement(root, element, UNTL)
else:
create_untl_xml_subelement(root, element)
return root | [
"def",
"create_xml",
"(",
"self",
",",
"useNamespace",
"=",
"False",
")",
":",
"UNTL_NAMESPACE",
"=",
"'http://digital2.library.unt.edu/untl/'",
"UNTL",
"=",
"'{%s}'",
"%",
"UNTL_NAMESPACE",
"NSMAP",
"=",
"{",
"'untl'",
":",
"UNTL_NAMESPACE",
"}",
"if",
"useNamespace",
":",
"root",
"=",
"Element",
"(",
"UNTL",
"+",
"self",
".",
"tag",
",",
"nsmap",
"=",
"NSMAP",
")",
"else",
":",
"root",
"=",
"Element",
"(",
"self",
".",
"tag",
")",
"# Sort the elements by the index of",
"# UNTL_XML_ORDER pre-ordered list.",
"self",
".",
"sort_untl",
"(",
"UNTL_XML_ORDER",
")",
"# Create an XML structure from field list.",
"for",
"element",
"in",
"self",
".",
"children",
":",
"if",
"useNamespace",
":",
"create_untl_xml_subelement",
"(",
"root",
",",
"element",
",",
"UNTL",
")",
"else",
":",
"create_untl_xml_subelement",
"(",
"root",
",",
"element",
")",
"return",
"root"
]
| Create an ElementTree representation of the object. | [
"Create",
"an",
"ElementTree",
"representation",
"of",
"the",
"object",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L371-L392 | train |
unt-libraries/pyuntl | pyuntl/untl_structure.py | Metadata.create_element_dict | def create_element_dict(self):
"""Convert a UNTL Python object into a UNTL Python dictionary."""
untl_dict = {}
# Loop through all UNTL elements in the Python object.
for element in self.children:
# If an entry for the element list hasn't been made in the
# dictionary, start an empty element list.
if element.tag not in untl_dict:
untl_dict[element.tag] = []
# Create a dictionary to put the element into.
# Add any qualifier.
element_dict = {}
if element.qualifier is not None:
element_dict['qualifier'] = element.qualifier
# Add any children that have content.
if len(element.contained_children) > 0:
child_dict = {}
for child in element.children:
if child.content is not None:
child_dict[child.tag] = child.content
# Set the element's content as the dictionary
# of children elements.
element_dict['content'] = child_dict
# The element has content, but no children.
elif element.content is not None:
element_dict['content'] = element.content
# Append the dictionary element to the element list.
untl_dict[element.tag].append(element_dict)
return untl_dict | python | def create_element_dict(self):
"""Convert a UNTL Python object into a UNTL Python dictionary."""
untl_dict = {}
# Loop through all UNTL elements in the Python object.
for element in self.children:
# If an entry for the element list hasn't been made in the
# dictionary, start an empty element list.
if element.tag not in untl_dict:
untl_dict[element.tag] = []
# Create a dictionary to put the element into.
# Add any qualifier.
element_dict = {}
if element.qualifier is not None:
element_dict['qualifier'] = element.qualifier
# Add any children that have content.
if len(element.contained_children) > 0:
child_dict = {}
for child in element.children:
if child.content is not None:
child_dict[child.tag] = child.content
# Set the element's content as the dictionary
# of children elements.
element_dict['content'] = child_dict
# The element has content, but no children.
elif element.content is not None:
element_dict['content'] = element.content
# Append the dictionary element to the element list.
untl_dict[element.tag].append(element_dict)
return untl_dict | [
"def",
"create_element_dict",
"(",
"self",
")",
":",
"untl_dict",
"=",
"{",
"}",
"# Loop through all UNTL elements in the Python object.",
"for",
"element",
"in",
"self",
".",
"children",
":",
"# If an entry for the element list hasn't been made in the",
"# dictionary, start an empty element list.",
"if",
"element",
".",
"tag",
"not",
"in",
"untl_dict",
":",
"untl_dict",
"[",
"element",
".",
"tag",
"]",
"=",
"[",
"]",
"# Create a dictionary to put the element into.",
"# Add any qualifier.",
"element_dict",
"=",
"{",
"}",
"if",
"element",
".",
"qualifier",
"is",
"not",
"None",
":",
"element_dict",
"[",
"'qualifier'",
"]",
"=",
"element",
".",
"qualifier",
"# Add any children that have content.",
"if",
"len",
"(",
"element",
".",
"contained_children",
")",
">",
"0",
":",
"child_dict",
"=",
"{",
"}",
"for",
"child",
"in",
"element",
".",
"children",
":",
"if",
"child",
".",
"content",
"is",
"not",
"None",
":",
"child_dict",
"[",
"child",
".",
"tag",
"]",
"=",
"child",
".",
"content",
"# Set the element's content as the dictionary",
"# of children elements.",
"element_dict",
"[",
"'content'",
"]",
"=",
"child_dict",
"# The element has content, but no children.",
"elif",
"element",
".",
"content",
"is",
"not",
"None",
":",
"element_dict",
"[",
"'content'",
"]",
"=",
"element",
".",
"content",
"# Append the dictionary element to the element list.",
"untl_dict",
"[",
"element",
".",
"tag",
"]",
".",
"append",
"(",
"element_dict",
")",
"return",
"untl_dict"
]
| Convert a UNTL Python object into a UNTL Python dictionary. | [
"Convert",
"a",
"UNTL",
"Python",
"object",
"into",
"a",
"UNTL",
"Python",
"dictionary",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L394-L423 | train |
unt-libraries/pyuntl | pyuntl/untl_structure.py | Metadata.create_xml_file | def create_xml_file(self, untl_filename):
"""Create a UNTL file.
Writes file to supplied file path.
"""
try:
f = open(untl_filename, 'w')
f.write(self.create_xml_string().encode('utf-8'))
f.close()
except:
raise UNTLStructureException(
'Failed to create UNTL XML file. File: %s' % (untl_filename)
) | python | def create_xml_file(self, untl_filename):
"""Create a UNTL file.
Writes file to supplied file path.
"""
try:
f = open(untl_filename, 'w')
f.write(self.create_xml_string().encode('utf-8'))
f.close()
except:
raise UNTLStructureException(
'Failed to create UNTL XML file. File: %s' % (untl_filename)
) | [
"def",
"create_xml_file",
"(",
"self",
",",
"untl_filename",
")",
":",
"try",
":",
"f",
"=",
"open",
"(",
"untl_filename",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"self",
".",
"create_xml_string",
"(",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"f",
".",
"close",
"(",
")",
"except",
":",
"raise",
"UNTLStructureException",
"(",
"'Failed to create UNTL XML file. File: %s'",
"%",
"(",
"untl_filename",
")",
")"
]
| Create a UNTL file.
Writes file to supplied file path. | [
"Create",
"a",
"UNTL",
"file",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L425-L437 | train |
unt-libraries/pyuntl | pyuntl/untl_structure.py | Metadata.sort_untl | def sort_untl(self, sort_structure):
"""Sort the UNTL Python object by the index
of a sort structure pre-ordered list.
"""
self.children.sort(key=lambda obj: sort_structure.index(obj.tag)) | python | def sort_untl(self, sort_structure):
"""Sort the UNTL Python object by the index
of a sort structure pre-ordered list.
"""
self.children.sort(key=lambda obj: sort_structure.index(obj.tag)) | [
"def",
"sort_untl",
"(",
"self",
",",
"sort_structure",
")",
":",
"self",
".",
"children",
".",
"sort",
"(",
"key",
"=",
"lambda",
"obj",
":",
"sort_structure",
".",
"index",
"(",
"obj",
".",
"tag",
")",
")"
]
| Sort the UNTL Python object by the index
of a sort structure pre-ordered list. | [
"Sort",
"the",
"UNTL",
"Python",
"object",
"by",
"the",
"index",
"of",
"a",
"sort",
"structure",
"pre",
"-",
"ordered",
"list",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L439-L443 | train |
unt-libraries/pyuntl | pyuntl/untl_structure.py | Metadata.generate_form_data | def generate_form_data(self, **kwargs):
"""Create a form dictionary with the key being the element name
and the value being a list of form element objects.
"""
# Add elements that are missing from the form.
self.children = add_missing_children(
self.contained_children,
self.children
)
# Add children to the keyword arguments.
kwargs['children'] = self.children
# Create the form object.
return FormGenerator(**kwargs) | python | def generate_form_data(self, **kwargs):
"""Create a form dictionary with the key being the element name
and the value being a list of form element objects.
"""
# Add elements that are missing from the form.
self.children = add_missing_children(
self.contained_children,
self.children
)
# Add children to the keyword arguments.
kwargs['children'] = self.children
# Create the form object.
return FormGenerator(**kwargs) | [
"def",
"generate_form_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# Add elements that are missing from the form.",
"self",
".",
"children",
"=",
"add_missing_children",
"(",
"self",
".",
"contained_children",
",",
"self",
".",
"children",
")",
"# Add children to the keyword arguments.",
"kwargs",
"[",
"'children'",
"]",
"=",
"self",
".",
"children",
"# Create the form object.",
"return",
"FormGenerator",
"(",
"*",
"*",
"kwargs",
")"
]
| Create a form dictionary with the key being the element name
and the value being a list of form element objects. | [
"Create",
"a",
"form",
"dictionary",
"with",
"the",
"key",
"being",
"the",
"element",
"name",
"and",
"the",
"value",
"being",
"a",
"list",
"of",
"form",
"element",
"objects",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/untl_structure.py#L449-L461 | train |
unt-libraries/pyuntl | pyuntl/etd_ms_structure.py | contributor_director | def contributor_director(**kwargs):
"""Define the expanded qualifier name."""
if kwargs.get('qualifier') in ETD_MS_CONTRIBUTOR_EXPANSION:
# Return the element object.
return ETD_MSContributor(
role=ETD_MS_CONTRIBUTOR_EXPANSION[kwargs.get('qualifier')],
**kwargs
)
else:
return None | python | def contributor_director(**kwargs):
"""Define the expanded qualifier name."""
if kwargs.get('qualifier') in ETD_MS_CONTRIBUTOR_EXPANSION:
# Return the element object.
return ETD_MSContributor(
role=ETD_MS_CONTRIBUTOR_EXPANSION[kwargs.get('qualifier')],
**kwargs
)
else:
return None | [
"def",
"contributor_director",
"(",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
".",
"get",
"(",
"'qualifier'",
")",
"in",
"ETD_MS_CONTRIBUTOR_EXPANSION",
":",
"# Return the element object.",
"return",
"ETD_MSContributor",
"(",
"role",
"=",
"ETD_MS_CONTRIBUTOR_EXPANSION",
"[",
"kwargs",
".",
"get",
"(",
"'qualifier'",
")",
"]",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"None"
]
| Define the expanded qualifier name. | [
"Define",
"the",
"expanded",
"qualifier",
"name",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/etd_ms_structure.py#L211-L220 | train |
unt-libraries/pyuntl | pyuntl/etd_ms_structure.py | date_director | def date_director(**kwargs):
"""Direct which class should be used based on the date qualifier
or if the date should be converted at all.
"""
# If the date is a creation date, return the element object.
if kwargs.get('qualifier') == 'creation':
return ETD_MSDate(content=kwargs.get('content').strip())
elif kwargs.get('qualifier') != 'digitized':
# Return the element object.
return ETD_MSDate(content=kwargs.get('content').strip())
else:
return None | python | def date_director(**kwargs):
"""Direct which class should be used based on the date qualifier
or if the date should be converted at all.
"""
# If the date is a creation date, return the element object.
if kwargs.get('qualifier') == 'creation':
return ETD_MSDate(content=kwargs.get('content').strip())
elif kwargs.get('qualifier') != 'digitized':
# Return the element object.
return ETD_MSDate(content=kwargs.get('content').strip())
else:
return None | [
"def",
"date_director",
"(",
"*",
"*",
"kwargs",
")",
":",
"# If the date is a creation date, return the element object.",
"if",
"kwargs",
".",
"get",
"(",
"'qualifier'",
")",
"==",
"'creation'",
":",
"return",
"ETD_MSDate",
"(",
"content",
"=",
"kwargs",
".",
"get",
"(",
"'content'",
")",
".",
"strip",
"(",
")",
")",
"elif",
"kwargs",
".",
"get",
"(",
"'qualifier'",
")",
"!=",
"'digitized'",
":",
"# Return the element object.",
"return",
"ETD_MSDate",
"(",
"content",
"=",
"kwargs",
".",
"get",
"(",
"'content'",
")",
".",
"strip",
"(",
")",
")",
"else",
":",
"return",
"None"
]
| Direct which class should be used based on the date qualifier
or if the date should be converted at all. | [
"Direct",
"which",
"class",
"should",
"be",
"used",
"based",
"on",
"the",
"date",
"qualifier",
"or",
"if",
"the",
"date",
"should",
"be",
"converted",
"at",
"all",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/etd_ms_structure.py#L234-L245 | train |
unt-libraries/pyuntl | pyuntl/etd_ms_structure.py | subject_director | def subject_director(**kwargs):
"""Direct how to handle a subject element."""
if kwargs.get('qualifier') not in ['KWD', '']:
return ETD_MSSubject(scheme=kwargs.get('qualifier'), **kwargs)
else:
return ETD_MSSubject(content=kwargs.get('content')) | python | def subject_director(**kwargs):
"""Direct how to handle a subject element."""
if kwargs.get('qualifier') not in ['KWD', '']:
return ETD_MSSubject(scheme=kwargs.get('qualifier'), **kwargs)
else:
return ETD_MSSubject(content=kwargs.get('content')) | [
"def",
"subject_director",
"(",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
".",
"get",
"(",
"'qualifier'",
")",
"not",
"in",
"[",
"'KWD'",
",",
"''",
"]",
":",
"return",
"ETD_MSSubject",
"(",
"scheme",
"=",
"kwargs",
".",
"get",
"(",
"'qualifier'",
")",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"ETD_MSSubject",
"(",
"content",
"=",
"kwargs",
".",
"get",
"(",
"'content'",
")",
")"
]
| Direct how to handle a subject element. | [
"Direct",
"how",
"to",
"handle",
"a",
"subject",
"element",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/etd_ms_structure.py#L262-L267 | train |
unt-libraries/pyuntl | pyuntl/etd_ms_structure.py | ETD_MSElement.get_child_content | def get_child_content(self, children, element_name):
"""Get the requested element content from a list of children."""
# Loop through the children and get the specified element.
for child in children:
# If the child is the requested element, return its content.
if child.tag == element_name:
return child.content
return '' | python | def get_child_content(self, children, element_name):
"""Get the requested element content from a list of children."""
# Loop through the children and get the specified element.
for child in children:
# If the child is the requested element, return its content.
if child.tag == element_name:
return child.content
return '' | [
"def",
"get_child_content",
"(",
"self",
",",
"children",
",",
"element_name",
")",
":",
"# Loop through the children and get the specified element.",
"for",
"child",
"in",
"children",
":",
"# If the child is the requested element, return its content.",
"if",
"child",
".",
"tag",
"==",
"element_name",
":",
"return",
"child",
".",
"content",
"return",
"''"
]
| Get the requested element content from a list of children. | [
"Get",
"the",
"requested",
"element",
"content",
"from",
"a",
"list",
"of",
"children",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/etd_ms_structure.py#L69-L76 | train |
geophysics-ubonn/crtomo_tools | src/td_residuals.py | shiftedColorMap | def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Parameters
----------
cmap:
The matplotlib colormap to be altered
start:
Offset from lowest point in the colormap's range. Defaults to 0.0 (no
lower ofset). Should be between 0.0 and `midpoint`.
midpoint:
The new center of the colormap. Defaults to 0.5 (no shift). Should be
between 0.0 and 1.0. In general, this should be 1 - vmax/(vmax +
abs(vmin)) For example if your data range from -15.0 to +5.0 and you
want the center of the colormap at 0.0, `midpoint` should be set to 1
- 5/(5 + 15)) or 0.75
stop:
Offset from highets point in the colormap's range. Defaults to 1.0 (no
upper ofset). Should be between `midpoint` and 1.0.
'''
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = mpl.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap | python | def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Parameters
----------
cmap:
The matplotlib colormap to be altered
start:
Offset from lowest point in the colormap's range. Defaults to 0.0 (no
lower ofset). Should be between 0.0 and `midpoint`.
midpoint:
The new center of the colormap. Defaults to 0.5 (no shift). Should be
between 0.0 and 1.0. In general, this should be 1 - vmax/(vmax +
abs(vmin)) For example if your data range from -15.0 to +5.0 and you
want the center of the colormap at 0.0, `midpoint` should be set to 1
- 5/(5 + 15)) or 0.75
stop:
Offset from highets point in the colormap's range. Defaults to 1.0 (no
upper ofset). Should be between `midpoint` and 1.0.
'''
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = mpl.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap | [
"def",
"shiftedColorMap",
"(",
"cmap",
",",
"start",
"=",
"0",
",",
"midpoint",
"=",
"0.5",
",",
"stop",
"=",
"1.0",
",",
"name",
"=",
"'shiftedcmap'",
")",
":",
"cdict",
"=",
"{",
"'red'",
":",
"[",
"]",
",",
"'green'",
":",
"[",
"]",
",",
"'blue'",
":",
"[",
"]",
",",
"'alpha'",
":",
"[",
"]",
"}",
"# regular index to compute the colors",
"reg_index",
"=",
"np",
".",
"linspace",
"(",
"start",
",",
"stop",
",",
"257",
")",
"# shifted index to match the data",
"shift_index",
"=",
"np",
".",
"hstack",
"(",
"[",
"np",
".",
"linspace",
"(",
"0.0",
",",
"midpoint",
",",
"128",
",",
"endpoint",
"=",
"False",
")",
",",
"np",
".",
"linspace",
"(",
"midpoint",
",",
"1.0",
",",
"129",
",",
"endpoint",
"=",
"True",
")",
"]",
")",
"for",
"ri",
",",
"si",
"in",
"zip",
"(",
"reg_index",
",",
"shift_index",
")",
":",
"r",
",",
"g",
",",
"b",
",",
"a",
"=",
"cmap",
"(",
"ri",
")",
"cdict",
"[",
"'red'",
"]",
".",
"append",
"(",
"(",
"si",
",",
"r",
",",
"r",
")",
")",
"cdict",
"[",
"'green'",
"]",
".",
"append",
"(",
"(",
"si",
",",
"g",
",",
"g",
")",
")",
"cdict",
"[",
"'blue'",
"]",
".",
"append",
"(",
"(",
"si",
",",
"b",
",",
"b",
")",
")",
"cdict",
"[",
"'alpha'",
"]",
".",
"append",
"(",
"(",
"si",
",",
"a",
",",
"a",
")",
")",
"newcmap",
"=",
"mpl",
".",
"colors",
".",
"LinearSegmentedColormap",
"(",
"name",
",",
"cdict",
")",
"plt",
".",
"register_cmap",
"(",
"cmap",
"=",
"newcmap",
")",
"return",
"newcmap"
]
| Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Parameters
----------
cmap:
The matplotlib colormap to be altered
start:
Offset from lowest point in the colormap's range. Defaults to 0.0 (no
lower ofset). Should be between 0.0 and `midpoint`.
midpoint:
The new center of the colormap. Defaults to 0.5 (no shift). Should be
between 0.0 and 1.0. In general, this should be 1 - vmax/(vmax +
abs(vmin)) For example if your data range from -15.0 to +5.0 and you
want the center of the colormap at 0.0, `midpoint` should be set to 1
- 5/(5 + 15)) or 0.75
stop:
Offset from highets point in the colormap's range. Defaults to 1.0 (no
upper ofset). Should be between `midpoint` and 1.0. | [
"Function",
"to",
"offset",
"the",
"center",
"of",
"a",
"colormap",
".",
"Useful",
"for",
"data",
"with",
"a",
"negative",
"min",
"and",
"positive",
"max",
"and",
"you",
"want",
"the",
"middle",
"of",
"the",
"colormap",
"s",
"dynamic",
"range",
"to",
"be",
"at",
"zero"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/td_residuals.py#L10-L60 | train |
geophysics-ubonn/crtomo_tools | src/td_residuals.py | read_lastmodfile | def read_lastmodfile(directory):
"""
Return the number of the final inversion result.
"""
filename = '{0}/exe/inv.lastmod'.format(directory)
# filename HAS to exist. Otherwise the inversion was not finished
if(not os.path.isfile(filename)):
return None
linestring = open(filename, 'r').readline().strip()
linestring = linestring.replace("\n", '')
linestring = linestring.replace(".mag", '')
linestring = linestring.replace("../inv/rho", '')
return linestring | python | def read_lastmodfile(directory):
"""
Return the number of the final inversion result.
"""
filename = '{0}/exe/inv.lastmod'.format(directory)
# filename HAS to exist. Otherwise the inversion was not finished
if(not os.path.isfile(filename)):
return None
linestring = open(filename, 'r').readline().strip()
linestring = linestring.replace("\n", '')
linestring = linestring.replace(".mag", '')
linestring = linestring.replace("../inv/rho", '')
return linestring | [
"def",
"read_lastmodfile",
"(",
"directory",
")",
":",
"filename",
"=",
"'{0}/exe/inv.lastmod'",
".",
"format",
"(",
"directory",
")",
"# filename HAS to exist. Otherwise the inversion was not finished",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
")",
":",
"return",
"None",
"linestring",
"=",
"open",
"(",
"filename",
",",
"'r'",
")",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"linestring",
"=",
"linestring",
".",
"replace",
"(",
"\"\\n\"",
",",
"''",
")",
"linestring",
"=",
"linestring",
".",
"replace",
"(",
"\".mag\"",
",",
"''",
")",
"linestring",
"=",
"linestring",
".",
"replace",
"(",
"\"../inv/rho\"",
",",
"''",
")",
"return",
"linestring"
]
| Return the number of the final inversion result. | [
"Return",
"the",
"number",
"of",
"the",
"final",
"inversion",
"result",
"."
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/td_residuals.py#L63-L76 | train |
Riminder/python-riminder-api | riminder/webhook.py | Webhook.setHandler | def setHandler(self, event_name, callback):
"""Set an handler for given event."""
if event_name not in self.handlers:
raise ValueError('{} is not a valid event'.format(event_name))
if callable(event_name):
raise TypeError('{} is not callable'.format(callback))
self.handlers[event_name] = callback | python | def setHandler(self, event_name, callback):
"""Set an handler for given event."""
if event_name not in self.handlers:
raise ValueError('{} is not a valid event'.format(event_name))
if callable(event_name):
raise TypeError('{} is not callable'.format(callback))
self.handlers[event_name] = callback | [
"def",
"setHandler",
"(",
"self",
",",
"event_name",
",",
"callback",
")",
":",
"if",
"event_name",
"not",
"in",
"self",
".",
"handlers",
":",
"raise",
"ValueError",
"(",
"'{} is not a valid event'",
".",
"format",
"(",
"event_name",
")",
")",
"if",
"callable",
"(",
"event_name",
")",
":",
"raise",
"TypeError",
"(",
"'{} is not callable'",
".",
"format",
"(",
"callback",
")",
")",
"self",
".",
"handlers",
"[",
"event_name",
"]",
"=",
"callback"
]
| Set an handler for given event. | [
"Set",
"an",
"handler",
"for",
"given",
"event",
"."
]
| 01279f0ece08cf3d1dd45f76de6d9edf7fafec90 | https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/webhook.py#L59-L65 | train |
Riminder/python-riminder-api | riminder/webhook.py | Webhook.isHandlerPresent | def isHandlerPresent(self, event_name):
"""Check if an event has an handler."""
if event_name not in self.handlers:
raise ValueError('{} is not a valid event'.format(event_name))
return self.handlers[event_name] is not None | python | def isHandlerPresent(self, event_name):
"""Check if an event has an handler."""
if event_name not in self.handlers:
raise ValueError('{} is not a valid event'.format(event_name))
return self.handlers[event_name] is not None | [
"def",
"isHandlerPresent",
"(",
"self",
",",
"event_name",
")",
":",
"if",
"event_name",
"not",
"in",
"self",
".",
"handlers",
":",
"raise",
"ValueError",
"(",
"'{} is not a valid event'",
".",
"format",
"(",
"event_name",
")",
")",
"return",
"self",
".",
"handlers",
"[",
"event_name",
"]",
"is",
"not",
"None"
]
| Check if an event has an handler. | [
"Check",
"if",
"an",
"event",
"has",
"an",
"handler",
"."
]
| 01279f0ece08cf3d1dd45f76de6d9edf7fafec90 | https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/webhook.py#L67-L71 | train |
Riminder/python-riminder-api | riminder/webhook.py | Webhook.removeHandler | def removeHandler(self, event_name):
"""Remove handler for given event."""
if event_name not in self.handlers:
raise ValueError('{} is not a valid event'.format(event_name))
self.handlers[event_name] = None | python | def removeHandler(self, event_name):
"""Remove handler for given event."""
if event_name not in self.handlers:
raise ValueError('{} is not a valid event'.format(event_name))
self.handlers[event_name] = None | [
"def",
"removeHandler",
"(",
"self",
",",
"event_name",
")",
":",
"if",
"event_name",
"not",
"in",
"self",
".",
"handlers",
":",
"raise",
"ValueError",
"(",
"'{} is not a valid event'",
".",
"format",
"(",
"event_name",
")",
")",
"self",
".",
"handlers",
"[",
"event_name",
"]",
"=",
"None"
]
| Remove handler for given event. | [
"Remove",
"handler",
"for",
"given",
"event",
"."
]
| 01279f0ece08cf3d1dd45f76de6d9edf7fafec90 | https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/webhook.py#L73-L77 | train |
Riminder/python-riminder-api | riminder/webhook.py | Webhook._get_fct_number_of_arg | def _get_fct_number_of_arg(self, fct):
"""Get the number of argument of a fuction."""
py_version = sys.version_info[0]
if py_version >= 3:
return len(inspect.signature(fct).parameters)
return len(inspect.getargspec(fct)[0]) | python | def _get_fct_number_of_arg(self, fct):
"""Get the number of argument of a fuction."""
py_version = sys.version_info[0]
if py_version >= 3:
return len(inspect.signature(fct).parameters)
return len(inspect.getargspec(fct)[0]) | [
"def",
"_get_fct_number_of_arg",
"(",
"self",
",",
"fct",
")",
":",
"py_version",
"=",
"sys",
".",
"version_info",
"[",
"0",
"]",
"if",
"py_version",
">=",
"3",
":",
"return",
"len",
"(",
"inspect",
".",
"signature",
"(",
"fct",
")",
".",
"parameters",
")",
"return",
"len",
"(",
"inspect",
".",
"getargspec",
"(",
"fct",
")",
"[",
"0",
"]",
")"
]
| Get the number of argument of a fuction. | [
"Get",
"the",
"number",
"of",
"argument",
"of",
"a",
"fuction",
"."
]
| 01279f0ece08cf3d1dd45f76de6d9edf7fafec90 | https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/webhook.py#L95-L100 | train |
eventifyio/eventify | eventify/service.py | event_tracker | def event_tracker(func):
"""
Event tracking handler
"""
@wraps(func)
async def wrapper(*args, **kwargs):
"""
Wraps function to provide redis
tracking
"""
event = Event(args[0])
session = kwargs['session']
service_name = session.name
await track_event(event, EventState.started, service_name)
await func(*args, **kwargs)
await track_event(event, EventState.completed, service_name)
return wrapper | python | def event_tracker(func):
"""
Event tracking handler
"""
@wraps(func)
async def wrapper(*args, **kwargs):
"""
Wraps function to provide redis
tracking
"""
event = Event(args[0])
session = kwargs['session']
service_name = session.name
await track_event(event, EventState.started, service_name)
await func(*args, **kwargs)
await track_event(event, EventState.completed, service_name)
return wrapper | [
"def",
"event_tracker",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"async",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"\n Wraps function to provide redis\n tracking\n \"\"\"",
"event",
"=",
"Event",
"(",
"args",
"[",
"0",
"]",
")",
"session",
"=",
"kwargs",
"[",
"'session'",
"]",
"service_name",
"=",
"session",
".",
"name",
"await",
"track_event",
"(",
"event",
",",
"EventState",
".",
"started",
",",
"service_name",
")",
"await",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"await",
"track_event",
"(",
"event",
",",
"EventState",
".",
"completed",
",",
"service_name",
")",
"return",
"wrapper"
]
| Event tracking handler | [
"Event",
"tracking",
"handler"
]
| 0e519964a56bd07a879b266f21f177749c63aaed | https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/service.py#L19-L35 | train |
KnightConan/sspdatatables | src/sspdatatables/utils/decorator.py | ensure_ajax | def ensure_ajax(valid_request_methods, error_response_context=None):
"""
Intends to ensure the received the request is ajax request and it is
included in the valid request methods
:param valid_request_methods: list: list of valid request methods, such as
'GET', 'POST'
:param error_response_context: None/dict: context dictionary to render, if
error occurs
:return: function
"""
def real_decorator(view_func):
def wrap_func(request, *args, **kwargs):
if not isinstance(request, HttpRequest):
# make sure the request is a django httprequest
return generate_error_json_response("Invalid request!",
error_response_context)
elif not request.is_ajax():
# ensure the request is an ajax request
return generate_error_json_response("Invalid request type!",
error_response_context)
elif request.method not in valid_request_methods:
# check if the request method is in allowed request methods
return generate_error_json_response("Invalid request method!",
error_response_context)
else:
return view_func(request, *args, **kwargs)
wrap_func.__doc__ = view_func.__doc__
wrap_func.__name__ = view_func.__name__
return wrap_func
return real_decorator | python | def ensure_ajax(valid_request_methods, error_response_context=None):
"""
Intends to ensure the received the request is ajax request and it is
included in the valid request methods
:param valid_request_methods: list: list of valid request methods, such as
'GET', 'POST'
:param error_response_context: None/dict: context dictionary to render, if
error occurs
:return: function
"""
def real_decorator(view_func):
def wrap_func(request, *args, **kwargs):
if not isinstance(request, HttpRequest):
# make sure the request is a django httprequest
return generate_error_json_response("Invalid request!",
error_response_context)
elif not request.is_ajax():
# ensure the request is an ajax request
return generate_error_json_response("Invalid request type!",
error_response_context)
elif request.method not in valid_request_methods:
# check if the request method is in allowed request methods
return generate_error_json_response("Invalid request method!",
error_response_context)
else:
return view_func(request, *args, **kwargs)
wrap_func.__doc__ = view_func.__doc__
wrap_func.__name__ = view_func.__name__
return wrap_func
return real_decorator | [
"def",
"ensure_ajax",
"(",
"valid_request_methods",
",",
"error_response_context",
"=",
"None",
")",
":",
"def",
"real_decorator",
"(",
"view_func",
")",
":",
"def",
"wrap_func",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"request",
",",
"HttpRequest",
")",
":",
"# make sure the request is a django httprequest",
"return",
"generate_error_json_response",
"(",
"\"Invalid request!\"",
",",
"error_response_context",
")",
"elif",
"not",
"request",
".",
"is_ajax",
"(",
")",
":",
"# ensure the request is an ajax request",
"return",
"generate_error_json_response",
"(",
"\"Invalid request type!\"",
",",
"error_response_context",
")",
"elif",
"request",
".",
"method",
"not",
"in",
"valid_request_methods",
":",
"# check if the request method is in allowed request methods",
"return",
"generate_error_json_response",
"(",
"\"Invalid request method!\"",
",",
"error_response_context",
")",
"else",
":",
"return",
"view_func",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"wrap_func",
".",
"__doc__",
"=",
"view_func",
".",
"__doc__",
"wrap_func",
".",
"__name__",
"=",
"view_func",
".",
"__name__",
"return",
"wrap_func",
"return",
"real_decorator"
]
| Intends to ensure the received the request is ajax request and it is
included in the valid request methods
:param valid_request_methods: list: list of valid request methods, such as
'GET', 'POST'
:param error_response_context: None/dict: context dictionary to render, if
error occurs
:return: function | [
"Intends",
"to",
"ensure",
"the",
"received",
"the",
"request",
"is",
"ajax",
"request",
"and",
"it",
"is",
"included",
"in",
"the",
"valid",
"request",
"methods"
]
| 1179a11358734e5e472e5eee703e8d34fa49e9bf | https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/utils/decorator.py#L8-L38 | train |
KnightConan/sspdatatables | src/sspdatatables/utils/decorator.py | generate_error_json_response | def generate_error_json_response(error_dict, error_response_context=None):
"""
Intends to build an error json response. If the error_response_context is
None, then we generate this response using data tables format
:param error_dict: str/dict: contains the error message(s)
:param error_response_context: None/dict: context dictionary to render, if
error occurs
:return: JsonResponse
"""
response = error_dict
if isinstance(error_dict, str):
response = {"error": response}
if error_response_context is None:
error_response_context = {
'draw': 0, 'recordsTotal': 0, 'recordsFiltered': 0, 'data': []
}
response.update(error_response_context)
return JsonResponse(response) | python | def generate_error_json_response(error_dict, error_response_context=None):
"""
Intends to build an error json response. If the error_response_context is
None, then we generate this response using data tables format
:param error_dict: str/dict: contains the error message(s)
:param error_response_context: None/dict: context dictionary to render, if
error occurs
:return: JsonResponse
"""
response = error_dict
if isinstance(error_dict, str):
response = {"error": response}
if error_response_context is None:
error_response_context = {
'draw': 0, 'recordsTotal': 0, 'recordsFiltered': 0, 'data': []
}
response.update(error_response_context)
return JsonResponse(response) | [
"def",
"generate_error_json_response",
"(",
"error_dict",
",",
"error_response_context",
"=",
"None",
")",
":",
"response",
"=",
"error_dict",
"if",
"isinstance",
"(",
"error_dict",
",",
"str",
")",
":",
"response",
"=",
"{",
"\"error\"",
":",
"response",
"}",
"if",
"error_response_context",
"is",
"None",
":",
"error_response_context",
"=",
"{",
"'draw'",
":",
"0",
",",
"'recordsTotal'",
":",
"0",
",",
"'recordsFiltered'",
":",
"0",
",",
"'data'",
":",
"[",
"]",
"}",
"response",
".",
"update",
"(",
"error_response_context",
")",
"return",
"JsonResponse",
"(",
"response",
")"
]
| Intends to build an error json response. If the error_response_context is
None, then we generate this response using data tables format
:param error_dict: str/dict: contains the error message(s)
:param error_response_context: None/dict: context dictionary to render, if
error occurs
:return: JsonResponse | [
"Intends",
"to",
"build",
"an",
"error",
"json",
"response",
".",
"If",
"the",
"error_response_context",
"is",
"None",
"then",
"we",
"generate",
"this",
"response",
"using",
"data",
"tables",
"format"
]
| 1179a11358734e5e472e5eee703e8d34fa49e9bf | https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/utils/decorator.py#L41-L59 | train |
gofed/gofedlib | gofedlib/go/symbolsextractor/extractor.py | GoSymbolsExtractor._mergeGoSymbols | def _mergeGoSymbols(self, jsons = []):
"""
Exported symbols for a given package does not have any prefix.
So I can drop all import paths that are file specific and merge
all symbols.
Assuming all files in the given package has mutual exclusive symbols.
"""
# <siXy> imports are per file, exports are per package
# on the highest level we have: pkgname, types, funcs, vars, imports.
symbols = {}
symbols["types"] = []
symbols["funcs"] = []
symbols["vars"] = []
for file_json in jsons:
symbols["types"] += file_json["types"]
symbols["funcs"] += file_json["funcs"]
symbols["vars"] += file_json["vars"]
return symbols | python | def _mergeGoSymbols(self, jsons = []):
"""
Exported symbols for a given package does not have any prefix.
So I can drop all import paths that are file specific and merge
all symbols.
Assuming all files in the given package has mutual exclusive symbols.
"""
# <siXy> imports are per file, exports are per package
# on the highest level we have: pkgname, types, funcs, vars, imports.
symbols = {}
symbols["types"] = []
symbols["funcs"] = []
symbols["vars"] = []
for file_json in jsons:
symbols["types"] += file_json["types"]
symbols["funcs"] += file_json["funcs"]
symbols["vars"] += file_json["vars"]
return symbols | [
"def",
"_mergeGoSymbols",
"(",
"self",
",",
"jsons",
"=",
"[",
"]",
")",
":",
"# <siXy> imports are per file, exports are per package",
"# on the highest level we have: pkgname, types, funcs, vars, imports.",
"symbols",
"=",
"{",
"}",
"symbols",
"[",
"\"types\"",
"]",
"=",
"[",
"]",
"symbols",
"[",
"\"funcs\"",
"]",
"=",
"[",
"]",
"symbols",
"[",
"\"vars\"",
"]",
"=",
"[",
"]",
"for",
"file_json",
"in",
"jsons",
":",
"symbols",
"[",
"\"types\"",
"]",
"+=",
"file_json",
"[",
"\"types\"",
"]",
"symbols",
"[",
"\"funcs\"",
"]",
"+=",
"file_json",
"[",
"\"funcs\"",
"]",
"symbols",
"[",
"\"vars\"",
"]",
"+=",
"file_json",
"[",
"\"vars\"",
"]",
"return",
"symbols"
]
| Exported symbols for a given package does not have any prefix.
So I can drop all import paths that are file specific and merge
all symbols.
Assuming all files in the given package has mutual exclusive symbols. | [
"Exported",
"symbols",
"for",
"a",
"given",
"package",
"does",
"not",
"have",
"any",
"prefix",
".",
"So",
"I",
"can",
"drop",
"all",
"import",
"paths",
"that",
"are",
"file",
"specific",
"and",
"merge",
"all",
"symbols",
".",
"Assuming",
"all",
"files",
"in",
"the",
"given",
"package",
"has",
"mutual",
"exclusive",
"symbols",
"."
]
| 0674c248fe3d8706f98f912996b65af469f96b10 | https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/go/symbolsextractor/extractor.py#L203-L222 | train |
NikolayDachev/jadm | lib/paramiko-1.14.1/paramiko/_winapi.py | MemoryMap.read | def read(self, n):
"""
Read n bytes from mapped view.
"""
out = ctypes.create_string_buffer(n)
ctypes.windll.kernel32.RtlMoveMemory(out, self.view + self.pos, n)
self.pos += n
return out.raw | python | def read(self, n):
"""
Read n bytes from mapped view.
"""
out = ctypes.create_string_buffer(n)
ctypes.windll.kernel32.RtlMoveMemory(out, self.view + self.pos, n)
self.pos += n
return out.raw | [
"def",
"read",
"(",
"self",
",",
"n",
")",
":",
"out",
"=",
"ctypes",
".",
"create_string_buffer",
"(",
"n",
")",
"ctypes",
".",
"windll",
".",
"kernel32",
".",
"RtlMoveMemory",
"(",
"out",
",",
"self",
".",
"view",
"+",
"self",
".",
"pos",
",",
"n",
")",
"self",
".",
"pos",
"+=",
"n",
"return",
"out",
".",
"raw"
]
| Read n bytes from mapped view. | [
"Read",
"n",
"bytes",
"from",
"mapped",
"view",
"."
]
| 12bb550445edfcd87506f7cba7a6a35d413c5511 | https://github.com/NikolayDachev/jadm/blob/12bb550445edfcd87506f7cba7a6a35d413c5511/lib/paramiko-1.14.1/paramiko/_winapi.py#L145-L152 | train |
thiagopbueno/tf-rddlsim | tfrddlsim/simulation/transition_simulator.py | ActionSimulationCell._output | def _output(cls, fluents: Sequence[FluentPair]) -> Sequence[tf.Tensor]:
'''Converts `fluents` to tensors with datatype tf.float32.'''
output = []
for _, fluent in fluents:
tensor = fluent.tensor
if tensor.dtype != tf.float32:
tensor = tf.cast(tensor, tf.float32)
output.append(tensor)
return tuple(output) | python | def _output(cls, fluents: Sequence[FluentPair]) -> Sequence[tf.Tensor]:
'''Converts `fluents` to tensors with datatype tf.float32.'''
output = []
for _, fluent in fluents:
tensor = fluent.tensor
if tensor.dtype != tf.float32:
tensor = tf.cast(tensor, tf.float32)
output.append(tensor)
return tuple(output) | [
"def",
"_output",
"(",
"cls",
",",
"fluents",
":",
"Sequence",
"[",
"FluentPair",
"]",
")",
"->",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
":",
"output",
"=",
"[",
"]",
"for",
"_",
",",
"fluent",
"in",
"fluents",
":",
"tensor",
"=",
"fluent",
".",
"tensor",
"if",
"tensor",
".",
"dtype",
"!=",
"tf",
".",
"float32",
":",
"tensor",
"=",
"tf",
".",
"cast",
"(",
"tensor",
",",
"tf",
".",
"float32",
")",
"output",
".",
"append",
"(",
"tensor",
")",
"return",
"tuple",
"(",
"output",
")"
]
| Converts `fluents` to tensors with datatype tf.float32. | [
"Converts",
"fluents",
"to",
"tensors",
"with",
"datatype",
"tf",
".",
"float32",
"."
]
| d7102a0ad37d179dbb23141640254ea383d3b43f | https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/transition_simulator.py#L124-L132 | train |
255BITS/hyperchamber | hyperchamber/selector.py | Selector.set | def set(self, key, value):
"""Sets a hyperparameter. Can be used to set an array of hyperparameters."""
self.store[key]=value
return self.store | python | def set(self, key, value):
"""Sets a hyperparameter. Can be used to set an array of hyperparameters."""
self.store[key]=value
return self.store | [
"def",
"set",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"store",
"[",
"key",
"]",
"=",
"value",
"return",
"self",
".",
"store"
]
| Sets a hyperparameter. Can be used to set an array of hyperparameters. | [
"Sets",
"a",
"hyperparameter",
".",
"Can",
"be",
"used",
"to",
"set",
"an",
"array",
"of",
"hyperparameters",
"."
]
| 4d5774bde9ea6ce1113f77a069ffc605148482b8 | https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/hyperchamber/selector.py#L29-L32 | train |
255BITS/hyperchamber | hyperchamber/selector.py | Selector.config_at | def config_at(self, i):
"""Gets the ith config"""
selections = {}
for key in self.store:
value = self.store[key]
if isinstance(value, list):
selected = i % len(value)
i = i // len(value)
selections[key]= value[selected]
else:
selections[key]= value
return Config(selections) | python | def config_at(self, i):
"""Gets the ith config"""
selections = {}
for key in self.store:
value = self.store[key]
if isinstance(value, list):
selected = i % len(value)
i = i // len(value)
selections[key]= value[selected]
else:
selections[key]= value
return Config(selections) | [
"def",
"config_at",
"(",
"self",
",",
"i",
")",
":",
"selections",
"=",
"{",
"}",
"for",
"key",
"in",
"self",
".",
"store",
":",
"value",
"=",
"self",
".",
"store",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"selected",
"=",
"i",
"%",
"len",
"(",
"value",
")",
"i",
"=",
"i",
"//",
"len",
"(",
"value",
")",
"selections",
"[",
"key",
"]",
"=",
"value",
"[",
"selected",
"]",
"else",
":",
"selections",
"[",
"key",
"]",
"=",
"value",
"return",
"Config",
"(",
"selections",
")"
]
| Gets the ith config | [
"Gets",
"the",
"ith",
"config"
]
| 4d5774bde9ea6ce1113f77a069ffc605148482b8 | https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/hyperchamber/selector.py#L77-L89 | train |
255BITS/hyperchamber | hyperchamber/selector.py | Selector.top | def top(self, sort_by):
"""Get the best results according to your custom sort method."""
sort = sorted(self.results, key=sort_by)
return sort | python | def top(self, sort_by):
"""Get the best results according to your custom sort method."""
sort = sorted(self.results, key=sort_by)
return sort | [
"def",
"top",
"(",
"self",
",",
"sort_by",
")",
":",
"sort",
"=",
"sorted",
"(",
"self",
".",
"results",
",",
"key",
"=",
"sort_by",
")",
"return",
"sort"
]
| Get the best results according to your custom sort method. | [
"Get",
"the",
"best",
"results",
"according",
"to",
"your",
"custom",
"sort",
"method",
"."
]
| 4d5774bde9ea6ce1113f77a069ffc605148482b8 | https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/hyperchamber/selector.py#L101-L104 | train |
255BITS/hyperchamber | hyperchamber/selector.py | Selector.load_or_create_config | def load_or_create_config(self, filename, config=None):
"""Loads a config from disk. Defaults to a random config if none is specified"""
os.makedirs(os.path.dirname(os.path.expanduser(filename)), exist_ok=True)
if os.path.exists(filename):
return self.load(filename)
if(config == None):
config = self.random_config()
self.save(filename, config)
return config | python | def load_or_create_config(self, filename, config=None):
"""Loads a config from disk. Defaults to a random config if none is specified"""
os.makedirs(os.path.dirname(os.path.expanduser(filename)), exist_ok=True)
if os.path.exists(filename):
return self.load(filename)
if(config == None):
config = self.random_config()
self.save(filename, config)
return config | [
"def",
"load_or_create_config",
"(",
"self",
",",
"filename",
",",
"config",
"=",
"None",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"filename",
")",
")",
",",
"exist_ok",
"=",
"True",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"return",
"self",
".",
"load",
"(",
"filename",
")",
"if",
"(",
"config",
"==",
"None",
")",
":",
"config",
"=",
"self",
".",
"random_config",
"(",
")",
"self",
".",
"save",
"(",
"filename",
",",
"config",
")",
"return",
"config"
]
| Loads a config from disk. Defaults to a random config if none is specified | [
"Loads",
"a",
"config",
"from",
"disk",
".",
"Defaults",
"to",
"a",
"random",
"config",
"if",
"none",
"is",
"specified"
]
| 4d5774bde9ea6ce1113f77a069ffc605148482b8 | https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/hyperchamber/selector.py#L115-L125 | train |
redhat-openstack/python-tripleo-helper | tripleohelper/undercloud.py | Undercloud.configure | def configure(self, repositories):
"""Prepare the system to be ready for an undercloud installation.
"""
self.enable_repositories(repositories)
self.create_stack_user()
self.install_base_packages()
self.clean_system()
self.yum_update(allow_reboot=True)
self.install_osp()
self.set_selinux('permissive')
self.fix_hostname() | python | def configure(self, repositories):
"""Prepare the system to be ready for an undercloud installation.
"""
self.enable_repositories(repositories)
self.create_stack_user()
self.install_base_packages()
self.clean_system()
self.yum_update(allow_reboot=True)
self.install_osp()
self.set_selinux('permissive')
self.fix_hostname() | [
"def",
"configure",
"(",
"self",
",",
"repositories",
")",
":",
"self",
".",
"enable_repositories",
"(",
"repositories",
")",
"self",
".",
"create_stack_user",
"(",
")",
"self",
".",
"install_base_packages",
"(",
")",
"self",
".",
"clean_system",
"(",
")",
"self",
".",
"yum_update",
"(",
"allow_reboot",
"=",
"True",
")",
"self",
".",
"install_osp",
"(",
")",
"self",
".",
"set_selinux",
"(",
"'permissive'",
")",
"self",
".",
"fix_hostname",
"(",
")"
]
| Prepare the system to be ready for an undercloud installation. | [
"Prepare",
"the",
"system",
"to",
"be",
"ready",
"for",
"an",
"undercloud",
"installation",
"."
]
| bfa165538335edb1088170c7a92f097167225c81 | https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/undercloud.py#L32-L42 | train |
redhat-openstack/python-tripleo-helper | tripleohelper/undercloud.py | Undercloud.openstack_undercloud_install | def openstack_undercloud_install(self):
"""Deploy an undercloud on the host.
"""
instack_undercloud_ver, _ = self.run('repoquery --whatprovides /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp')
if instack_undercloud_ver.rstrip('\n') == 'instack-undercloud-0:2.2.0-1.el7ost.noarch':
LOG.warn('Workaround for BZ1298189')
self.run("sed -i \"s/.*Keystone_domain\['heat_domain'\].*/Service\['keystone'\] -> Class\['::keystone::roles::admin'\] -> Class\['::heat::keystone::domain'\]/\" /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp")
self.run('OS_PASSWORD=bob openstack undercloud install', user='stack')
# NOTE(Gonéri): we also need this after the overcloud deployment
if self.run('rpm -qa openstack-ironic-api')[0].rstrip('\n') == 'openstack-ironic-api-4.2.2-3.el7ost.noarch':
LOG.warn('Workaround for BZ1297796')
self.run('systemctl start openstack-ironic-api.service')
self.add_environment_file(user='stack', filename='stackrc')
self.run('heat stack-list', user='stack') | python | def openstack_undercloud_install(self):
"""Deploy an undercloud on the host.
"""
instack_undercloud_ver, _ = self.run('repoquery --whatprovides /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp')
if instack_undercloud_ver.rstrip('\n') == 'instack-undercloud-0:2.2.0-1.el7ost.noarch':
LOG.warn('Workaround for BZ1298189')
self.run("sed -i \"s/.*Keystone_domain\['heat_domain'\].*/Service\['keystone'\] -> Class\['::keystone::roles::admin'\] -> Class\['::heat::keystone::domain'\]/\" /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp")
self.run('OS_PASSWORD=bob openstack undercloud install', user='stack')
# NOTE(Gonéri): we also need this after the overcloud deployment
if self.run('rpm -qa openstack-ironic-api')[0].rstrip('\n') == 'openstack-ironic-api-4.2.2-3.el7ost.noarch':
LOG.warn('Workaround for BZ1297796')
self.run('systemctl start openstack-ironic-api.service')
self.add_environment_file(user='stack', filename='stackrc')
self.run('heat stack-list', user='stack') | [
"def",
"openstack_undercloud_install",
"(",
"self",
")",
":",
"instack_undercloud_ver",
",",
"_",
"=",
"self",
".",
"run",
"(",
"'repoquery --whatprovides /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp'",
")",
"if",
"instack_undercloud_ver",
".",
"rstrip",
"(",
"'\\n'",
")",
"==",
"'instack-undercloud-0:2.2.0-1.el7ost.noarch'",
":",
"LOG",
".",
"warn",
"(",
"'Workaround for BZ1298189'",
")",
"self",
".",
"run",
"(",
"\"sed -i \\\"s/.*Keystone_domain\\['heat_domain'\\].*/Service\\['keystone'\\] -> Class\\['::keystone::roles::admin'\\] -> Class\\['::heat::keystone::domain'\\]/\\\" /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp\"",
")",
"self",
".",
"run",
"(",
"'OS_PASSWORD=bob openstack undercloud install'",
",",
"user",
"=",
"'stack'",
")",
"# NOTE(Gonéri): we also need this after the overcloud deployment",
"if",
"self",
".",
"run",
"(",
"'rpm -qa openstack-ironic-api'",
")",
"[",
"0",
"]",
".",
"rstrip",
"(",
"'\\n'",
")",
"==",
"'openstack-ironic-api-4.2.2-3.el7ost.noarch'",
":",
"LOG",
".",
"warn",
"(",
"'Workaround for BZ1297796'",
")",
"self",
".",
"run",
"(",
"'systemctl start openstack-ironic-api.service'",
")",
"self",
".",
"add_environment_file",
"(",
"user",
"=",
"'stack'",
",",
"filename",
"=",
"'stackrc'",
")",
"self",
".",
"run",
"(",
"'heat stack-list'",
",",
"user",
"=",
"'stack'",
")"
]
| Deploy an undercloud on the host. | [
"Deploy",
"an",
"undercloud",
"on",
"the",
"host",
"."
]
| bfa165538335edb1088170c7a92f097167225c81 | https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/undercloud.py#L56-L70 | train |
redhat-openstack/python-tripleo-helper | tripleohelper/undercloud.py | Undercloud.create_flavor | def create_flavor(self, name):
"""Create a new baremetal flavor.
:param name: the name of the flavor
"""
self.add_environment_file(user='stack', filename='stackrc')
self.run('openstack flavor create --id auto --ram 4096 --disk 40 --vcpus 1 baremetal', user='stack', success_status=(0, 1))
self.run('openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal', user='stack')
self.run('openstack flavor set --property "capabilities:profile"="baremetal" baremetal', user='stack') | python | def create_flavor(self, name):
"""Create a new baremetal flavor.
:param name: the name of the flavor
"""
self.add_environment_file(user='stack', filename='stackrc')
self.run('openstack flavor create --id auto --ram 4096 --disk 40 --vcpus 1 baremetal', user='stack', success_status=(0, 1))
self.run('openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal', user='stack')
self.run('openstack flavor set --property "capabilities:profile"="baremetal" baremetal', user='stack') | [
"def",
"create_flavor",
"(",
"self",
",",
"name",
")",
":",
"self",
".",
"add_environment_file",
"(",
"user",
"=",
"'stack'",
",",
"filename",
"=",
"'stackrc'",
")",
"self",
".",
"run",
"(",
"'openstack flavor create --id auto --ram 4096 --disk 40 --vcpus 1 baremetal'",
",",
"user",
"=",
"'stack'",
",",
"success_status",
"=",
"(",
"0",
",",
"1",
")",
")",
"self",
".",
"run",
"(",
"'openstack flavor set --property \"cpu_arch\"=\"x86_64\" --property \"capabilities:boot_option\"=\"local\" baremetal'",
",",
"user",
"=",
"'stack'",
")",
"self",
".",
"run",
"(",
"'openstack flavor set --property \"capabilities:profile\"=\"baremetal\" baremetal'",
",",
"user",
"=",
"'stack'",
")"
]
| Create a new baremetal flavor.
:param name: the name of the flavor | [
"Create",
"a",
"new",
"baremetal",
"flavor",
"."
]
| bfa165538335edb1088170c7a92f097167225c81 | https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/undercloud.py#L188-L196 | train |
redhat-openstack/python-tripleo-helper | tripleohelper/undercloud.py | Undercloud.list_nodes | def list_nodes(self):
"""List the Ironic nodes UUID."""
self.add_environment_file(user='stack', filename='stackrc')
ret, _ = self.run("ironic node-list --fields uuid|awk '/-.*-/ {print $2}'", user='stack')
# NOTE(Gonéri): the good new is, the order of the nodes is preserved and follow the one from
# the instackenv.json, BUT it may be interesting to add a check.
return ret.split() | python | def list_nodes(self):
"""List the Ironic nodes UUID."""
self.add_environment_file(user='stack', filename='stackrc')
ret, _ = self.run("ironic node-list --fields uuid|awk '/-.*-/ {print $2}'", user='stack')
# NOTE(Gonéri): the good new is, the order of the nodes is preserved and follow the one from
# the instackenv.json, BUT it may be interesting to add a check.
return ret.split() | [
"def",
"list_nodes",
"(",
"self",
")",
":",
"self",
".",
"add_environment_file",
"(",
"user",
"=",
"'stack'",
",",
"filename",
"=",
"'stackrc'",
")",
"ret",
",",
"_",
"=",
"self",
".",
"run",
"(",
"\"ironic node-list --fields uuid|awk '/-.*-/ {print $2}'\"",
",",
"user",
"=",
"'stack'",
")",
"# NOTE(Gonéri): the good new is, the order of the nodes is preserved and follow the one from",
"# the instackenv.json, BUT it may be interesting to add a check.",
"return",
"ret",
".",
"split",
"(",
")"
]
| List the Ironic nodes UUID. | [
"List",
"the",
"Ironic",
"nodes",
"UUID",
"."
]
| bfa165538335edb1088170c7a92f097167225c81 | https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/undercloud.py#L198-L204 | train |
redhat-openstack/python-tripleo-helper | tripleohelper/undercloud.py | Undercloud.set_flavor | def set_flavor(self, node, flavor):
"""Set a flavor to a given ironic node.
:param uuid: the ironic node UUID
:param flavor: the flavor name
"""
command = (
'ironic node-update {uuid} add '
'properties/capabilities=profile:{flavor},boot_option:local').format(
uuid=node.uuid, flavor=flavor)
node.flavor = flavor
self.add_environment_file(user='stack', filename='stackrc')
self.run(command, user='stack') | python | def set_flavor(self, node, flavor):
"""Set a flavor to a given ironic node.
:param uuid: the ironic node UUID
:param flavor: the flavor name
"""
command = (
'ironic node-update {uuid} add '
'properties/capabilities=profile:{flavor},boot_option:local').format(
uuid=node.uuid, flavor=flavor)
node.flavor = flavor
self.add_environment_file(user='stack', filename='stackrc')
self.run(command, user='stack') | [
"def",
"set_flavor",
"(",
"self",
",",
"node",
",",
"flavor",
")",
":",
"command",
"=",
"(",
"'ironic node-update {uuid} add '",
"'properties/capabilities=profile:{flavor},boot_option:local'",
")",
".",
"format",
"(",
"uuid",
"=",
"node",
".",
"uuid",
",",
"flavor",
"=",
"flavor",
")",
"node",
".",
"flavor",
"=",
"flavor",
"self",
".",
"add_environment_file",
"(",
"user",
"=",
"'stack'",
",",
"filename",
"=",
"'stackrc'",
")",
"self",
".",
"run",
"(",
"command",
",",
"user",
"=",
"'stack'",
")"
]
| Set a flavor to a given ironic node.
:param uuid: the ironic node UUID
:param flavor: the flavor name | [
"Set",
"a",
"flavor",
"to",
"a",
"given",
"ironic",
"node",
"."
]
| bfa165538335edb1088170c7a92f097167225c81 | https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/undercloud.py#L206-L219 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.