repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
vatlab/SoS | src/sos/section_analyzer.py | get_step_output | def get_step_output(section, default_output):
'''determine step output'''
step_output: sos_targets = sos_targets()
#
if 'provides' in section.options and default_output:
step_output = default_output
# look for input statement.
output_idx = find_statement(section, 'output')
if output_idx is None:
return step_output
# output statement
value = section.statements[output_idx][2]
# output, depends, and process can be processed multiple times
try:
svars = ['output_from', 'named_output', 'sos_step', 'sos_variable']
old_values = {
x: env.sos_dict.dict()[x]
for x in svars
if x in env.sos_dict.dict()
}
env.sos_dict.quick_update({
'output_from': no_output_from,
'named_output': no_named_output,
'sos_step': no_sos_step,
'sos_variable': no_sos_variable,
})
args, kwargs = SoS_eval(
f'__null_func__({value})', extra_dict=env.sos_dict.dict())
if not any(isinstance(x, (dynamic, remote)) for x in args):
step_output = sos_targets(
*args, **{
x: y
for x, y in kwargs.items()
if x not in SOS_TARGETS_OPTIONS
})
except SyntaxError:
raise
except Exception as e:
if 'STEP' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('STEP', f"Args {value} cannot be determined: {e}")
finally:
[env.sos_dict.dict().pop(x) for x in svars]
env.sos_dict.quick_update(old_values)
if 'provides' in section.options and default_output is not None and step_output.valid(
):
for out in default_output:
# 981
if not isinstance(out, sos_step) and out not in step_output:
raise ValueError(
f'Defined output fail to produce expected output: {step_output} generated, {default_output} expected.'
)
return step_output | python | def get_step_output(section, default_output):
'''determine step output'''
step_output: sos_targets = sos_targets()
#
if 'provides' in section.options and default_output:
step_output = default_output
# look for input statement.
output_idx = find_statement(section, 'output')
if output_idx is None:
return step_output
# output statement
value = section.statements[output_idx][2]
# output, depends, and process can be processed multiple times
try:
svars = ['output_from', 'named_output', 'sos_step', 'sos_variable']
old_values = {
x: env.sos_dict.dict()[x]
for x in svars
if x in env.sos_dict.dict()
}
env.sos_dict.quick_update({
'output_from': no_output_from,
'named_output': no_named_output,
'sos_step': no_sos_step,
'sos_variable': no_sos_variable,
})
args, kwargs = SoS_eval(
f'__null_func__({value})', extra_dict=env.sos_dict.dict())
if not any(isinstance(x, (dynamic, remote)) for x in args):
step_output = sos_targets(
*args, **{
x: y
for x, y in kwargs.items()
if x not in SOS_TARGETS_OPTIONS
})
except SyntaxError:
raise
except Exception as e:
if 'STEP' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('STEP', f"Args {value} cannot be determined: {e}")
finally:
[env.sos_dict.dict().pop(x) for x in svars]
env.sos_dict.quick_update(old_values)
if 'provides' in section.options and default_output is not None and step_output.valid(
):
for out in default_output:
# 981
if not isinstance(out, sos_step) and out not in step_output:
raise ValueError(
f'Defined output fail to produce expected output: {step_output} generated, {default_output} expected.'
)
return step_output | [
"def",
"get_step_output",
"(",
"section",
",",
"default_output",
")",
":",
"step_output",
":",
"sos_targets",
"=",
"sos_targets",
"(",
")",
"#",
"if",
"'provides'",
"in",
"section",
".",
"options",
"and",
"default_output",
":",
"step_output",
"=",
"default_output",
"# look for input statement.",
"output_idx",
"=",
"find_statement",
"(",
"section",
",",
"'output'",
")",
"if",
"output_idx",
"is",
"None",
":",
"return",
"step_output",
"# output statement",
"value",
"=",
"section",
".",
"statements",
"[",
"output_idx",
"]",
"[",
"2",
"]",
"# output, depends, and process can be processed multiple times",
"try",
":",
"svars",
"=",
"[",
"'output_from'",
",",
"'named_output'",
",",
"'sos_step'",
",",
"'sos_variable'",
"]",
"old_values",
"=",
"{",
"x",
":",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
"[",
"x",
"]",
"for",
"x",
"in",
"svars",
"if",
"x",
"in",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
"}",
"env",
".",
"sos_dict",
".",
"quick_update",
"(",
"{",
"'output_from'",
":",
"no_output_from",
",",
"'named_output'",
":",
"no_named_output",
",",
"'sos_step'",
":",
"no_sos_step",
",",
"'sos_variable'",
":",
"no_sos_variable",
",",
"}",
")",
"args",
",",
"kwargs",
"=",
"SoS_eval",
"(",
"f'__null_func__({value})'",
",",
"extra_dict",
"=",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
")",
"if",
"not",
"any",
"(",
"isinstance",
"(",
"x",
",",
"(",
"dynamic",
",",
"remote",
")",
")",
"for",
"x",
"in",
"args",
")",
":",
"step_output",
"=",
"sos_targets",
"(",
"*",
"args",
",",
"*",
"*",
"{",
"x",
":",
"y",
"for",
"x",
",",
"y",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"x",
"not",
"in",
"SOS_TARGETS_OPTIONS",
"}",
")",
"except",
"SyntaxError",
":",
"raise",
"except",
"Exception",
"as",
"e",
":",
"if",
"'STEP'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
"or",
"'ALL'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
":",
"env",
".",
"log_to_file",
"(",
"'STEP'",
",",
"f\"Args {value} cannot be determined: {e}\"",
")",
"finally",
":",
"[",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
".",
"pop",
"(",
"x",
")",
"for",
"x",
"in",
"svars",
"]",
"env",
".",
"sos_dict",
".",
"quick_update",
"(",
"old_values",
")",
"if",
"'provides'",
"in",
"section",
".",
"options",
"and",
"default_output",
"is",
"not",
"None",
"and",
"step_output",
".",
"valid",
"(",
")",
":",
"for",
"out",
"in",
"default_output",
":",
"# 981",
"if",
"not",
"isinstance",
"(",
"out",
",",
"sos_step",
")",
"and",
"out",
"not",
"in",
"step_output",
":",
"raise",
"ValueError",
"(",
"f'Defined output fail to produce expected output: {step_output} generated, {default_output} expected.'",
")",
"return",
"step_output"
]
| determine step output | [
"determine",
"step",
"output"
]
| 6b60ed0770916d135e17322e469520d778e9d4e7 | https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/section_analyzer.py#L394-L448 | train |
vatlab/SoS | src/sos/section_analyzer.py | analyze_section | def analyze_section(section: SoS_Step,
default_input: Optional[sos_targets] = None,
default_output: Optional[sos_targets] = None,
context={},
vars_and_output_only: bool = False) -> Dict[str, Any]:
'''Analyze a section for how it uses input and output, what variables
it uses, and input, output, etc.'''
# analysis_key = (section.md5, section.step_name(),
# default_input.target_name() if hasattr(default_input, 'target_name') else '',
# default_output.target_name() if hasattr(default_output, 'target_name') else '', vars_and_output_only)
#if analysis_key in analysis_cache:
# return analysis_cache[analysis_key]
# use a fresh env for analysis
new_env, old_env = env.request_new()
try:
prepare_env(section.global_def, section.global_vars, context)
env.sos_dict.set('step_name', section.step_name())
env.sos_dict.set('__null_func__', __null_func__)
if 'STEP' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file(
'STEP',
f'Analyzing {section.step_name()} {"(output only)" if vars_and_output_only else ""}'
)
res = {
'step_name': section.step_name(),
'step_output': get_step_output(section, default_output),
# variables starting with __ are internals...
'environ_vars': get_environ_vars(section),
'signature_vars': get_signature_vars(section),
'changed_vars': get_changed_vars(section)
}
if not vars_and_output_only:
inps = get_step_input(section, default_input)
res['step_input'] = inps[0]
res['dynamic_input'] = inps[1]
deps = get_step_depends(section)
res['step_depends'] = deps[0]
res['dynamic_depends'] = deps[1]
# analysis_cache[analysis_key] = res
finally:
# restore env
env.restore_to_old(new_env, old_env)
# #1225
# The global section can contain a lot of variables, some of which can be large. Here we
# found all variables that will be used in the step, including ones used in substep (signature_vars)
# and ones that will be used in input statement etc.
section.global_vars = {
x: y
for x, y in section.global_vars.items()
if x in get_all_used_vars(section)
}
return res | python | def analyze_section(section: SoS_Step,
default_input: Optional[sos_targets] = None,
default_output: Optional[sos_targets] = None,
context={},
vars_and_output_only: bool = False) -> Dict[str, Any]:
'''Analyze a section for how it uses input and output, what variables
it uses, and input, output, etc.'''
# analysis_key = (section.md5, section.step_name(),
# default_input.target_name() if hasattr(default_input, 'target_name') else '',
# default_output.target_name() if hasattr(default_output, 'target_name') else '', vars_and_output_only)
#if analysis_key in analysis_cache:
# return analysis_cache[analysis_key]
# use a fresh env for analysis
new_env, old_env = env.request_new()
try:
prepare_env(section.global_def, section.global_vars, context)
env.sos_dict.set('step_name', section.step_name())
env.sos_dict.set('__null_func__', __null_func__)
if 'STEP' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file(
'STEP',
f'Analyzing {section.step_name()} {"(output only)" if vars_and_output_only else ""}'
)
res = {
'step_name': section.step_name(),
'step_output': get_step_output(section, default_output),
# variables starting with __ are internals...
'environ_vars': get_environ_vars(section),
'signature_vars': get_signature_vars(section),
'changed_vars': get_changed_vars(section)
}
if not vars_and_output_only:
inps = get_step_input(section, default_input)
res['step_input'] = inps[0]
res['dynamic_input'] = inps[1]
deps = get_step_depends(section)
res['step_depends'] = deps[0]
res['dynamic_depends'] = deps[1]
# analysis_cache[analysis_key] = res
finally:
# restore env
env.restore_to_old(new_env, old_env)
# #1225
# The global section can contain a lot of variables, some of which can be large. Here we
# found all variables that will be used in the step, including ones used in substep (signature_vars)
# and ones that will be used in input statement etc.
section.global_vars = {
x: y
for x, y in section.global_vars.items()
if x in get_all_used_vars(section)
}
return res | [
"def",
"analyze_section",
"(",
"section",
":",
"SoS_Step",
",",
"default_input",
":",
"Optional",
"[",
"sos_targets",
"]",
"=",
"None",
",",
"default_output",
":",
"Optional",
"[",
"sos_targets",
"]",
"=",
"None",
",",
"context",
"=",
"{",
"}",
",",
"vars_and_output_only",
":",
"bool",
"=",
"False",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"# analysis_key = (section.md5, section.step_name(),",
"# default_input.target_name() if hasattr(default_input, 'target_name') else '',",
"# default_output.target_name() if hasattr(default_output, 'target_name') else '', vars_and_output_only)",
"#if analysis_key in analysis_cache:",
"# return analysis_cache[analysis_key]",
"# use a fresh env for analysis",
"new_env",
",",
"old_env",
"=",
"env",
".",
"request_new",
"(",
")",
"try",
":",
"prepare_env",
"(",
"section",
".",
"global_def",
",",
"section",
".",
"global_vars",
",",
"context",
")",
"env",
".",
"sos_dict",
".",
"set",
"(",
"'step_name'",
",",
"section",
".",
"step_name",
"(",
")",
")",
"env",
".",
"sos_dict",
".",
"set",
"(",
"'__null_func__'",
",",
"__null_func__",
")",
"if",
"'STEP'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
"or",
"'ALL'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
":",
"env",
".",
"log_to_file",
"(",
"'STEP'",
",",
"f'Analyzing {section.step_name()} {\"(output only)\" if vars_and_output_only else \"\"}'",
")",
"res",
"=",
"{",
"'step_name'",
":",
"section",
".",
"step_name",
"(",
")",
",",
"'step_output'",
":",
"get_step_output",
"(",
"section",
",",
"default_output",
")",
",",
"# variables starting with __ are internals...",
"'environ_vars'",
":",
"get_environ_vars",
"(",
"section",
")",
",",
"'signature_vars'",
":",
"get_signature_vars",
"(",
"section",
")",
",",
"'changed_vars'",
":",
"get_changed_vars",
"(",
"section",
")",
"}",
"if",
"not",
"vars_and_output_only",
":",
"inps",
"=",
"get_step_input",
"(",
"section",
",",
"default_input",
")",
"res",
"[",
"'step_input'",
"]",
"=",
"inps",
"[",
"0",
"]",
"res",
"[",
"'dynamic_input'",
"]",
"=",
"inps",
"[",
"1",
"]",
"deps",
"=",
"get_step_depends",
"(",
"section",
")",
"res",
"[",
"'step_depends'",
"]",
"=",
"deps",
"[",
"0",
"]",
"res",
"[",
"'dynamic_depends'",
"]",
"=",
"deps",
"[",
"1",
"]",
"# analysis_cache[analysis_key] = res",
"finally",
":",
"# restore env",
"env",
".",
"restore_to_old",
"(",
"new_env",
",",
"old_env",
")",
"# #1225",
"# The global section can contain a lot of variables, some of which can be large. Here we",
"# found all variables that will be used in the step, including ones used in substep (signature_vars)",
"# and ones that will be used in input statement etc.",
"section",
".",
"global_vars",
"=",
"{",
"x",
":",
"y",
"for",
"x",
",",
"y",
"in",
"section",
".",
"global_vars",
".",
"items",
"(",
")",
"if",
"x",
"in",
"get_all_used_vars",
"(",
"section",
")",
"}",
"return",
"res"
]
| Analyze a section for how it uses input and output, what variables
it uses, and input, output, etc. | [
"Analyze",
"a",
"section",
"for",
"how",
"it",
"uses",
"input",
"and",
"output",
"what",
"variables",
"it",
"uses",
"and",
"input",
"output",
"etc",
"."
]
| 6b60ed0770916d135e17322e469520d778e9d4e7 | https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/section_analyzer.py#L514-L569 | train |
vatlab/SoS | src/sos/converter.py | extract_workflow | def extract_workflow(notebook):
'''Extract workflow from a notebook file or notebook JSON instance'''
if isinstance(notebook, str):
nb = nbformat.read(notebook, nbformat.NO_CONVERT)
else:
nb = notebook
cells = nb.cells
content = '#!/usr/bin/env sos-runner\n#fileformat=SOS1.0\n\n'
for cell in cells:
if cell.cell_type != "code":
continue
# Non-sos code cells are also ignored
if 'kernel' in cell.metadata and cell.metadata['kernel'] not in ('sos',
'SoS',
None):
continue
lines = cell.source.split('\n')
valid_cell = False
for idx, line in enumerate(lines):
if valid_cell or (line.startswith('%include') or
line.startswith('%from')):
content += line + '\n'
elif SOS_SECTION_HEADER.match(line):
valid_cell = True
# look retrospectively for comments
c = idx - 1
comment = ''
while c >= 0 and lines[c].startswith('#'):
comment = lines[c] + '\n' + comment
c -= 1
content += comment + line + '\n'
if valid_cell:
content += '\n'
return content | python | def extract_workflow(notebook):
'''Extract workflow from a notebook file or notebook JSON instance'''
if isinstance(notebook, str):
nb = nbformat.read(notebook, nbformat.NO_CONVERT)
else:
nb = notebook
cells = nb.cells
content = '#!/usr/bin/env sos-runner\n#fileformat=SOS1.0\n\n'
for cell in cells:
if cell.cell_type != "code":
continue
# Non-sos code cells are also ignored
if 'kernel' in cell.metadata and cell.metadata['kernel'] not in ('sos',
'SoS',
None):
continue
lines = cell.source.split('\n')
valid_cell = False
for idx, line in enumerate(lines):
if valid_cell or (line.startswith('%include') or
line.startswith('%from')):
content += line + '\n'
elif SOS_SECTION_HEADER.match(line):
valid_cell = True
# look retrospectively for comments
c = idx - 1
comment = ''
while c >= 0 and lines[c].startswith('#'):
comment = lines[c] + '\n' + comment
c -= 1
content += comment + line + '\n'
if valid_cell:
content += '\n'
return content | [
"def",
"extract_workflow",
"(",
"notebook",
")",
":",
"if",
"isinstance",
"(",
"notebook",
",",
"str",
")",
":",
"nb",
"=",
"nbformat",
".",
"read",
"(",
"notebook",
",",
"nbformat",
".",
"NO_CONVERT",
")",
"else",
":",
"nb",
"=",
"notebook",
"cells",
"=",
"nb",
".",
"cells",
"content",
"=",
"'#!/usr/bin/env sos-runner\\n#fileformat=SOS1.0\\n\\n'",
"for",
"cell",
"in",
"cells",
":",
"if",
"cell",
".",
"cell_type",
"!=",
"\"code\"",
":",
"continue",
"# Non-sos code cells are also ignored",
"if",
"'kernel'",
"in",
"cell",
".",
"metadata",
"and",
"cell",
".",
"metadata",
"[",
"'kernel'",
"]",
"not",
"in",
"(",
"'sos'",
",",
"'SoS'",
",",
"None",
")",
":",
"continue",
"lines",
"=",
"cell",
".",
"source",
".",
"split",
"(",
"'\\n'",
")",
"valid_cell",
"=",
"False",
"for",
"idx",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"valid_cell",
"or",
"(",
"line",
".",
"startswith",
"(",
"'%include'",
")",
"or",
"line",
".",
"startswith",
"(",
"'%from'",
")",
")",
":",
"content",
"+=",
"line",
"+",
"'\\n'",
"elif",
"SOS_SECTION_HEADER",
".",
"match",
"(",
"line",
")",
":",
"valid_cell",
"=",
"True",
"# look retrospectively for comments",
"c",
"=",
"idx",
"-",
"1",
"comment",
"=",
"''",
"while",
"c",
">=",
"0",
"and",
"lines",
"[",
"c",
"]",
".",
"startswith",
"(",
"'#'",
")",
":",
"comment",
"=",
"lines",
"[",
"c",
"]",
"+",
"'\\n'",
"+",
"comment",
"c",
"-=",
"1",
"content",
"+=",
"comment",
"+",
"line",
"+",
"'\\n'",
"if",
"valid_cell",
":",
"content",
"+=",
"'\\n'",
"return",
"content"
]
| Extract workflow from a notebook file or notebook JSON instance | [
"Extract",
"workflow",
"from",
"a",
"notebook",
"file",
"or",
"notebook",
"JSON",
"instance"
]
| 6b60ed0770916d135e17322e469520d778e9d4e7 | https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/converter.py#L166-L199 | train |
vatlab/SoS | misc/vim-ipython/vim_ipython.py | vim_ipython_is_open | def vim_ipython_is_open():
"""
Helper function to let us know if the vim-ipython shell is currently
visible
"""
for w in vim.windows:
if w.buffer.name is not None and w.buffer.name.endswith("vim-ipython"):
return True
return False | python | def vim_ipython_is_open():
"""
Helper function to let us know if the vim-ipython shell is currently
visible
"""
for w in vim.windows:
if w.buffer.name is not None and w.buffer.name.endswith("vim-ipython"):
return True
return False | [
"def",
"vim_ipython_is_open",
"(",
")",
":",
"for",
"w",
"in",
"vim",
".",
"windows",
":",
"if",
"w",
".",
"buffer",
".",
"name",
"is",
"not",
"None",
"and",
"w",
".",
"buffer",
".",
"name",
".",
"endswith",
"(",
"\"vim-ipython\"",
")",
":",
"return",
"True",
"return",
"False"
]
| Helper function to let us know if the vim-ipython shell is currently
visible | [
"Helper",
"function",
"to",
"let",
"us",
"know",
"if",
"the",
"vim",
"-",
"ipython",
"shell",
"is",
"currently",
"visible"
]
| 6b60ed0770916d135e17322e469520d778e9d4e7 | https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/misc/vim-ipython/vim_ipython.py#L345-L353 | train |
vatlab/SoS | misc/vim-ipython/vim_ipython.py | with_subchannel | def with_subchannel(f,*args):
"conditionally monitor subchannel"
def f_with_update(*args):
try:
f(*args)
if monitor_subchannel:
update_subchannel_msgs(force=True)
except AttributeError: #if kc is None
echo("not connected to IPython", 'Error')
return f_with_update | python | def with_subchannel(f,*args):
"conditionally monitor subchannel"
def f_with_update(*args):
try:
f(*args)
if monitor_subchannel:
update_subchannel_msgs(force=True)
except AttributeError: #if kc is None
echo("not connected to IPython", 'Error')
return f_with_update | [
"def",
"with_subchannel",
"(",
"f",
",",
"*",
"args",
")",
":",
"def",
"f_with_update",
"(",
"*",
"args",
")",
":",
"try",
":",
"f",
"(",
"*",
"args",
")",
"if",
"monitor_subchannel",
":",
"update_subchannel_msgs",
"(",
"force",
"=",
"True",
")",
"except",
"AttributeError",
":",
"#if kc is None",
"echo",
"(",
"\"not connected to IPython\"",
",",
"'Error'",
")",
"return",
"f_with_update"
]
| conditionally monitor subchannel | [
"conditionally",
"monitor",
"subchannel"
]
| 6b60ed0770916d135e17322e469520d778e9d4e7 | https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/misc/vim-ipython/vim_ipython.py#L570-L579 | train |
vatlab/SoS | misc/vim-ipython/vim_ipython.py | set_pid | def set_pid():
"""
Explicitly ask the ipython kernel for its pid
"""
global pid
lines = '\n'.join(['import os', '_pid = os.getpid()'])
try:
msg_id = send(lines, silent=True, user_variables=['_pid'])
except TypeError: # change in IPython 3.0+
msg_id = send(lines, silent=True, user_expressions={'_pid':'_pid'})
# wait to get message back from kernel
try:
child = get_child_msg(msg_id)
except Empty:
echo("no reply from IPython kernel")
return
try:
pid = int(child['content']['user_variables']['_pid'])
except TypeError: # change in IPython 1.0.dev moved this out
pid = int(child['content']['user_variables']['_pid']['data']['text/plain'])
except KeyError: # change in IPython 3.0+
pid = int(
child['content']['user_expressions']['_pid']['data']['text/plain'])
except KeyError: # change in IPython 1.0.dev moved this out
echo("Could not get PID information, kernel not running Python?")
return pid | python | def set_pid():
"""
Explicitly ask the ipython kernel for its pid
"""
global pid
lines = '\n'.join(['import os', '_pid = os.getpid()'])
try:
msg_id = send(lines, silent=True, user_variables=['_pid'])
except TypeError: # change in IPython 3.0+
msg_id = send(lines, silent=True, user_expressions={'_pid':'_pid'})
# wait to get message back from kernel
try:
child = get_child_msg(msg_id)
except Empty:
echo("no reply from IPython kernel")
return
try:
pid = int(child['content']['user_variables']['_pid'])
except TypeError: # change in IPython 1.0.dev moved this out
pid = int(child['content']['user_variables']['_pid']['data']['text/plain'])
except KeyError: # change in IPython 3.0+
pid = int(
child['content']['user_expressions']['_pid']['data']['text/plain'])
except KeyError: # change in IPython 1.0.dev moved this out
echo("Could not get PID information, kernel not running Python?")
return pid | [
"def",
"set_pid",
"(",
")",
":",
"global",
"pid",
"lines",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"'import os'",
",",
"'_pid = os.getpid()'",
"]",
")",
"try",
":",
"msg_id",
"=",
"send",
"(",
"lines",
",",
"silent",
"=",
"True",
",",
"user_variables",
"=",
"[",
"'_pid'",
"]",
")",
"except",
"TypeError",
":",
"# change in IPython 3.0+",
"msg_id",
"=",
"send",
"(",
"lines",
",",
"silent",
"=",
"True",
",",
"user_expressions",
"=",
"{",
"'_pid'",
":",
"'_pid'",
"}",
")",
"# wait to get message back from kernel",
"try",
":",
"child",
"=",
"get_child_msg",
"(",
"msg_id",
")",
"except",
"Empty",
":",
"echo",
"(",
"\"no reply from IPython kernel\"",
")",
"return",
"try",
":",
"pid",
"=",
"int",
"(",
"child",
"[",
"'content'",
"]",
"[",
"'user_variables'",
"]",
"[",
"'_pid'",
"]",
")",
"except",
"TypeError",
":",
"# change in IPython 1.0.dev moved this out",
"pid",
"=",
"int",
"(",
"child",
"[",
"'content'",
"]",
"[",
"'user_variables'",
"]",
"[",
"'_pid'",
"]",
"[",
"'data'",
"]",
"[",
"'text/plain'",
"]",
")",
"except",
"KeyError",
":",
"# change in IPython 3.0+",
"pid",
"=",
"int",
"(",
"child",
"[",
"'content'",
"]",
"[",
"'user_expressions'",
"]",
"[",
"'_pid'",
"]",
"[",
"'data'",
"]",
"[",
"'text/plain'",
"]",
")",
"except",
"KeyError",
":",
"# change in IPython 1.0.dev moved this out",
"echo",
"(",
"\"Could not get PID information, kernel not running Python?\"",
")",
"return",
"pid"
]
| Explicitly ask the ipython kernel for its pid | [
"Explicitly",
"ask",
"the",
"ipython",
"kernel",
"for",
"its",
"pid"
]
| 6b60ed0770916d135e17322e469520d778e9d4e7 | https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/misc/vim-ipython/vim_ipython.py#L646-L673 | train |
BradRuderman/pyhs2 | pyhs2/cursor.py | Cursor.fetchmany | def fetchmany(self,size=-1):
""" return a sequential set of records. This is guaranteed by locking,
so that no other thread can grab a few records while a set is fetched.
this has the side effect that other threads may have to wait for
an arbitrary long time for the completion of the current request.
"""
self._cursorLock.acquire()
# default value (or just checking that someone did not put a ridiculous size)
if size < 0 or size > self.MAX_BLOCK_SIZE:
size = self.arraysize
recs = []
for i in range(0,size):
recs.append(self.fetchone())
self._cursorLock.release()
return recs | python | def fetchmany(self,size=-1):
""" return a sequential set of records. This is guaranteed by locking,
so that no other thread can grab a few records while a set is fetched.
this has the side effect that other threads may have to wait for
an arbitrary long time for the completion of the current request.
"""
self._cursorLock.acquire()
# default value (or just checking that someone did not put a ridiculous size)
if size < 0 or size > self.MAX_BLOCK_SIZE:
size = self.arraysize
recs = []
for i in range(0,size):
recs.append(self.fetchone())
self._cursorLock.release()
return recs | [
"def",
"fetchmany",
"(",
"self",
",",
"size",
"=",
"-",
"1",
")",
":",
"self",
".",
"_cursorLock",
".",
"acquire",
"(",
")",
"# default value (or just checking that someone did not put a ridiculous size)",
"if",
"size",
"<",
"0",
"or",
"size",
">",
"self",
".",
"MAX_BLOCK_SIZE",
":",
"size",
"=",
"self",
".",
"arraysize",
"recs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"size",
")",
":",
"recs",
".",
"append",
"(",
"self",
".",
"fetchone",
"(",
")",
")",
"self",
".",
"_cursorLock",
".",
"release",
"(",
")",
"return",
"recs"
]
| return a sequential set of records. This is guaranteed by locking,
so that no other thread can grab a few records while a set is fetched.
this has the side effect that other threads may have to wait for
an arbitrary long time for the completion of the current request. | [
"return",
"a",
"sequential",
"set",
"of",
"records",
".",
"This",
"is",
"guaranteed",
"by",
"locking",
"so",
"that",
"no",
"other",
"thread",
"can",
"grab",
"a",
"few",
"records",
"while",
"a",
"set",
"is",
"fetched",
".",
"this",
"has",
"the",
"side",
"effect",
"that",
"other",
"threads",
"may",
"have",
"to",
"wait",
"for",
"an",
"arbitrary",
"long",
"time",
"for",
"the",
"completion",
"of",
"the",
"current",
"request",
"."
]
| 1094d4b3a1e9032ee17eeb41f3381bbbd95862c1 | https://github.com/BradRuderman/pyhs2/blob/1094d4b3a1e9032ee17eeb41f3381bbbd95862c1/pyhs2/cursor.py#L143-L159 | train |
kashifrazzaqui/json-streamer | jsonstreamer/jsonstreamer.py | JSONStreamer.on_number | def on_number(self, ctx, value):
''' Since this is defined both integer and double callbacks are useless '''
value = int(value) if value.isdigit() else float(value)
top = self._stack[-1]
if top is JSONCompositeType.OBJECT:
self.fire(JSONStreamer.VALUE_EVENT, value)
elif top is JSONCompositeType.ARRAY:
self.fire(JSONStreamer.ELEMENT_EVENT, value)
else:
raise RuntimeError('Invalid json-streamer state') | python | def on_number(self, ctx, value):
''' Since this is defined both integer and double callbacks are useless '''
value = int(value) if value.isdigit() else float(value)
top = self._stack[-1]
if top is JSONCompositeType.OBJECT:
self.fire(JSONStreamer.VALUE_EVENT, value)
elif top is JSONCompositeType.ARRAY:
self.fire(JSONStreamer.ELEMENT_EVENT, value)
else:
raise RuntimeError('Invalid json-streamer state') | [
"def",
"on_number",
"(",
"self",
",",
"ctx",
",",
"value",
")",
":",
"value",
"=",
"int",
"(",
"value",
")",
"if",
"value",
".",
"isdigit",
"(",
")",
"else",
"float",
"(",
"value",
")",
"top",
"=",
"self",
".",
"_stack",
"[",
"-",
"1",
"]",
"if",
"top",
"is",
"JSONCompositeType",
".",
"OBJECT",
":",
"self",
".",
"fire",
"(",
"JSONStreamer",
".",
"VALUE_EVENT",
",",
"value",
")",
"elif",
"top",
"is",
"JSONCompositeType",
".",
"ARRAY",
":",
"self",
".",
"fire",
"(",
"JSONStreamer",
".",
"ELEMENT_EVENT",
",",
"value",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Invalid json-streamer state'",
")"
]
| Since this is defined both integer and double callbacks are useless | [
"Since",
"this",
"is",
"defined",
"both",
"integer",
"and",
"double",
"callbacks",
"are",
"useless"
]
| f87527d57557d11682c12727a1a4eeda9cca3c8f | https://github.com/kashifrazzaqui/json-streamer/blob/f87527d57557d11682c12727a1a4eeda9cca3c8f/jsonstreamer/jsonstreamer.py#L147-L156 | train |
kashifrazzaqui/json-streamer | jsonstreamer/jsonstreamer.py | JSONStreamer.close | def close(self):
"""Closes the streamer which causes a `DOC_END_EVENT` to be fired and frees up memory used by yajl"""
self.fire(JSONStreamer.DOC_END_EVENT)
self._stack = None
self._parser.close() | python | def close(self):
"""Closes the streamer which causes a `DOC_END_EVENT` to be fired and frees up memory used by yajl"""
self.fire(JSONStreamer.DOC_END_EVENT)
self._stack = None
self._parser.close() | [
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"fire",
"(",
"JSONStreamer",
".",
"DOC_END_EVENT",
")",
"self",
".",
"_stack",
"=",
"None",
"self",
".",
"_parser",
".",
"close",
"(",
")"
]
| Closes the streamer which causes a `DOC_END_EVENT` to be fired and frees up memory used by yajl | [
"Closes",
"the",
"streamer",
"which",
"causes",
"a",
"DOC_END_EVENT",
"to",
"be",
"fired",
"and",
"frees",
"up",
"memory",
"used",
"by",
"yajl"
]
| f87527d57557d11682c12727a1a4eeda9cca3c8f | https://github.com/kashifrazzaqui/json-streamer/blob/f87527d57557d11682c12727a1a4eeda9cca3c8f/jsonstreamer/jsonstreamer.py#L190-L194 | train |
paolodragone/pymzn | pymzn/mzn/aio/minizinc.py | minizinc | async def minizinc(
mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None,
globals_dir=None, declare_enums=True, allow_multiple_assignments=False,
keep=False, output_vars=None, output_base=None, output_mode='dict',
solver=None, timeout=None, two_pass=None, pre_passes=None,
output_objective=False, non_unique=False, all_solutions=False,
num_solutions=None, free_search=False, parallel=None, seed=None,
rebase_arrays=True, keep_solutions=True, return_enums=False,
max_queue_size=0, **kwargs
):
"""Coroutine version of the ``pymzn.minizinc`` function.
Parameters
----------
max_queue_size : int
Maximum number of solutions in the queue between the solution parser and
the returned solution stream. When the queue is full, the solver
execution will halt untill an item of the queue is consumed. This option
is useful for memory management in cases where the solution stream gets
very large and the caller cannot consume solutions as fast as they are
produced. Use with care, if the full solution stream is not consumed
before the execution of the Python program ends it may result in the
solver becoming a zombie process. Default is ``0``, meaning an infinite
queue.
"""
mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \
_minizinc_preliminaries(
mzn, *dzn_files, args=args, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_vars=output_vars, keep=keep, output_base=output_base,
output_mode=output_mode, declare_enums=declare_enums,
allow_multiple_assignments=allow_multiple_assignments
)
if not solver:
solver = config.get('solver', gecode)
solver_args = {**kwargs, **config.get('solver_args', {})}
proc = await solve(
solver, mzn_file, *dzn_files, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_mode=_output_mode, timeout=timeout, two_pass=two_pass,
pre_passes=pre_passes, output_objective=output_objective,
non_unique=non_unique, all_solutions=all_solutions,
num_solutions=num_solutions, free_search=free_search, parallel=parallel,
seed=seed, allow_multiple_assignments=allow_multiple_assignments,
**solver_args
)
if output_mode == 'raw':
solns = asyncio.Queue(maxsize=max_queue_size)
task = asyncio.create_task(_collect(proc, solns))
else:
parser = AsyncSolutionParser(
solver, output_mode=output_mode, rebase_arrays=rebase_arrays,
types=types, keep_solutions=keep_solutions,
return_enums=return_enums, max_queue_size=max_queue_size
)
solns = await parser.parse(proc)
task = parser.parse_task
if not keep:
task.add_done_callback(partial(_cleanup_cb, [mzn_file, data_file]))
return solns | python | async def minizinc(
mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None,
globals_dir=None, declare_enums=True, allow_multiple_assignments=False,
keep=False, output_vars=None, output_base=None, output_mode='dict',
solver=None, timeout=None, two_pass=None, pre_passes=None,
output_objective=False, non_unique=False, all_solutions=False,
num_solutions=None, free_search=False, parallel=None, seed=None,
rebase_arrays=True, keep_solutions=True, return_enums=False,
max_queue_size=0, **kwargs
):
"""Coroutine version of the ``pymzn.minizinc`` function.
Parameters
----------
max_queue_size : int
Maximum number of solutions in the queue between the solution parser and
the returned solution stream. When the queue is full, the solver
execution will halt untill an item of the queue is consumed. This option
is useful for memory management in cases where the solution stream gets
very large and the caller cannot consume solutions as fast as they are
produced. Use with care, if the full solution stream is not consumed
before the execution of the Python program ends it may result in the
solver becoming a zombie process. Default is ``0``, meaning an infinite
queue.
"""
mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \
_minizinc_preliminaries(
mzn, *dzn_files, args=args, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_vars=output_vars, keep=keep, output_base=output_base,
output_mode=output_mode, declare_enums=declare_enums,
allow_multiple_assignments=allow_multiple_assignments
)
if not solver:
solver = config.get('solver', gecode)
solver_args = {**kwargs, **config.get('solver_args', {})}
proc = await solve(
solver, mzn_file, *dzn_files, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_mode=_output_mode, timeout=timeout, two_pass=two_pass,
pre_passes=pre_passes, output_objective=output_objective,
non_unique=non_unique, all_solutions=all_solutions,
num_solutions=num_solutions, free_search=free_search, parallel=parallel,
seed=seed, allow_multiple_assignments=allow_multiple_assignments,
**solver_args
)
if output_mode == 'raw':
solns = asyncio.Queue(maxsize=max_queue_size)
task = asyncio.create_task(_collect(proc, solns))
else:
parser = AsyncSolutionParser(
solver, output_mode=output_mode, rebase_arrays=rebase_arrays,
types=types, keep_solutions=keep_solutions,
return_enums=return_enums, max_queue_size=max_queue_size
)
solns = await parser.parse(proc)
task = parser.parse_task
if not keep:
task.add_done_callback(partial(_cleanup_cb, [mzn_file, data_file]))
return solns | [
"async",
"def",
"minizinc",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"args",
"=",
"None",
",",
"data",
"=",
"None",
",",
"include",
"=",
"None",
",",
"stdlib_dir",
"=",
"None",
",",
"globals_dir",
"=",
"None",
",",
"declare_enums",
"=",
"True",
",",
"allow_multiple_assignments",
"=",
"False",
",",
"keep",
"=",
"False",
",",
"output_vars",
"=",
"None",
",",
"output_base",
"=",
"None",
",",
"output_mode",
"=",
"'dict'",
",",
"solver",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"two_pass",
"=",
"None",
",",
"pre_passes",
"=",
"None",
",",
"output_objective",
"=",
"False",
",",
"non_unique",
"=",
"False",
",",
"all_solutions",
"=",
"False",
",",
"num_solutions",
"=",
"None",
",",
"free_search",
"=",
"False",
",",
"parallel",
"=",
"None",
",",
"seed",
"=",
"None",
",",
"rebase_arrays",
"=",
"True",
",",
"keep_solutions",
"=",
"True",
",",
"return_enums",
"=",
"False",
",",
"max_queue_size",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"mzn_file",
",",
"dzn_files",
",",
"data_file",
",",
"data",
",",
"keep",
",",
"_output_mode",
",",
"types",
"=",
"_minizinc_preliminaries",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"args",
"=",
"args",
",",
"data",
"=",
"data",
",",
"include",
"=",
"include",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"output_vars",
"=",
"output_vars",
",",
"keep",
"=",
"keep",
",",
"output_base",
"=",
"output_base",
",",
"output_mode",
"=",
"output_mode",
",",
"declare_enums",
"=",
"declare_enums",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
")",
"if",
"not",
"solver",
":",
"solver",
"=",
"config",
".",
"get",
"(",
"'solver'",
",",
"gecode",
")",
"solver_args",
"=",
"{",
"*",
"*",
"kwargs",
",",
"*",
"*",
"config",
".",
"get",
"(",
"'solver_args'",
",",
"{",
"}",
")",
"}",
"proc",
"=",
"await",
"solve",
"(",
"solver",
",",
"mzn_file",
",",
"*",
"dzn_files",
",",
"data",
"=",
"data",
",",
"include",
"=",
"include",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"output_mode",
"=",
"_output_mode",
",",
"timeout",
"=",
"timeout",
",",
"two_pass",
"=",
"two_pass",
",",
"pre_passes",
"=",
"pre_passes",
",",
"output_objective",
"=",
"output_objective",
",",
"non_unique",
"=",
"non_unique",
",",
"all_solutions",
"=",
"all_solutions",
",",
"num_solutions",
"=",
"num_solutions",
",",
"free_search",
"=",
"free_search",
",",
"parallel",
"=",
"parallel",
",",
"seed",
"=",
"seed",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
",",
"*",
"*",
"solver_args",
")",
"if",
"output_mode",
"==",
"'raw'",
":",
"solns",
"=",
"asyncio",
".",
"Queue",
"(",
"maxsize",
"=",
"max_queue_size",
")",
"task",
"=",
"asyncio",
".",
"create_task",
"(",
"_collect",
"(",
"proc",
",",
"solns",
")",
")",
"else",
":",
"parser",
"=",
"AsyncSolutionParser",
"(",
"solver",
",",
"output_mode",
"=",
"output_mode",
",",
"rebase_arrays",
"=",
"rebase_arrays",
",",
"types",
"=",
"types",
",",
"keep_solutions",
"=",
"keep_solutions",
",",
"return_enums",
"=",
"return_enums",
",",
"max_queue_size",
"=",
"max_queue_size",
")",
"solns",
"=",
"await",
"parser",
".",
"parse",
"(",
"proc",
")",
"task",
"=",
"parser",
".",
"parse_task",
"if",
"not",
"keep",
":",
"task",
".",
"add_done_callback",
"(",
"partial",
"(",
"_cleanup_cb",
",",
"[",
"mzn_file",
",",
"data_file",
"]",
")",
")",
"return",
"solns"
]
| Coroutine version of the ``pymzn.minizinc`` function.
Parameters
----------
max_queue_size : int
Maximum number of solutions in the queue between the solution parser and
the returned solution stream. When the queue is full, the solver
execution will halt untill an item of the queue is consumed. This option
is useful for memory management in cases where the solution stream gets
very large and the caller cannot consume solutions as fast as they are
produced. Use with care, if the full solution stream is not consumed
before the execution of the Python program ends it may result in the
solver becoming a zombie process. Default is ``0``, meaning an infinite
queue. | [
"Coroutine",
"version",
"of",
"the",
"pymzn",
".",
"minizinc",
"function",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/aio/minizinc.py#L35-L101 | train |
paolodragone/pymzn | pymzn/dzn/parse.py | parse_value | def parse_value(val, var_type=None, enums=None, rebase_arrays=True):
"""Parses the value of a dzn statement.
Parameters
----------
val : str
A value in dzn format.
var_type : dict
The dictionary of variable type as returned by the command ``minizinc
--model-types-only``. Default is ``None``, in which case the type of the
variable is inferred from its value. To parse an enum value as a Python
enum type its ``var_type`` is required, otherwise the value is simply
returned as a string.
enums : dict of IntEnum
A dictionary containing Python enums, with their respective names as
keys. These enums are available to the parser to convert enum values
into corresponding values of their respective enum types. Enum values
can only be parsed if also the ``var_type`` of the variable is
available.
rebase_arrays : bool
If the parsed value is an array and ``rebase_arrays`` is ``True``,
return it as zero-based lists. If ``rebase_arrays`` is ``False``,
instead, return it as a dictionary, preserving the original index-set.
Returns
-------
object
The parsed object. The type of the object depends on the dzn value.
"""
if not var_type:
p_val = _parse_array(
val, rebase_arrays=rebase_arrays, enums=enums, raise_errors=False
)
if p_val is not None:
return p_val
return _parse_val(val, enums=enums)
if 'dims' in var_type:
return _parse_array(
val, rebase_arrays=rebase_arrays, var_type=var_type, enums=enums
)
return _parse_val(val, var_type=var_type, enums=enums) | python | def parse_value(val, var_type=None, enums=None, rebase_arrays=True):
"""Parses the value of a dzn statement.
Parameters
----------
val : str
A value in dzn format.
var_type : dict
The dictionary of variable type as returned by the command ``minizinc
--model-types-only``. Default is ``None``, in which case the type of the
variable is inferred from its value. To parse an enum value as a Python
enum type its ``var_type`` is required, otherwise the value is simply
returned as a string.
enums : dict of IntEnum
A dictionary containing Python enums, with their respective names as
keys. These enums are available to the parser to convert enum values
into corresponding values of their respective enum types. Enum values
can only be parsed if also the ``var_type`` of the variable is
available.
rebase_arrays : bool
If the parsed value is an array and ``rebase_arrays`` is ``True``,
return it as zero-based lists. If ``rebase_arrays`` is ``False``,
instead, return it as a dictionary, preserving the original index-set.
Returns
-------
object
The parsed object. The type of the object depends on the dzn value.
"""
if not var_type:
p_val = _parse_array(
val, rebase_arrays=rebase_arrays, enums=enums, raise_errors=False
)
if p_val is not None:
return p_val
return _parse_val(val, enums=enums)
if 'dims' in var_type:
return _parse_array(
val, rebase_arrays=rebase_arrays, var_type=var_type, enums=enums
)
return _parse_val(val, var_type=var_type, enums=enums) | [
"def",
"parse_value",
"(",
"val",
",",
"var_type",
"=",
"None",
",",
"enums",
"=",
"None",
",",
"rebase_arrays",
"=",
"True",
")",
":",
"if",
"not",
"var_type",
":",
"p_val",
"=",
"_parse_array",
"(",
"val",
",",
"rebase_arrays",
"=",
"rebase_arrays",
",",
"enums",
"=",
"enums",
",",
"raise_errors",
"=",
"False",
")",
"if",
"p_val",
"is",
"not",
"None",
":",
"return",
"p_val",
"return",
"_parse_val",
"(",
"val",
",",
"enums",
"=",
"enums",
")",
"if",
"'dims'",
"in",
"var_type",
":",
"return",
"_parse_array",
"(",
"val",
",",
"rebase_arrays",
"=",
"rebase_arrays",
",",
"var_type",
"=",
"var_type",
",",
"enums",
"=",
"enums",
")",
"return",
"_parse_val",
"(",
"val",
",",
"var_type",
"=",
"var_type",
",",
"enums",
"=",
"enums",
")"
]
| Parses the value of a dzn statement.
Parameters
----------
val : str
A value in dzn format.
var_type : dict
The dictionary of variable type as returned by the command ``minizinc
--model-types-only``. Default is ``None``, in which case the type of the
variable is inferred from its value. To parse an enum value as a Python
enum type its ``var_type`` is required, otherwise the value is simply
returned as a string.
enums : dict of IntEnum
A dictionary containing Python enums, with their respective names as
keys. These enums are available to the parser to convert enum values
into corresponding values of their respective enum types. Enum values
can only be parsed if also the ``var_type`` of the variable is
available.
rebase_arrays : bool
If the parsed value is an array and ``rebase_arrays`` is ``True``,
return it as zero-based lists. If ``rebase_arrays`` is ``False``,
instead, return it as a dictionary, preserving the original index-set.
Returns
-------
object
The parsed object. The type of the object depends on the dzn value. | [
"Parses",
"the",
"value",
"of",
"a",
"dzn",
"statement",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/dzn/parse.py#L395-L438 | train |
paolodragone/pymzn | pymzn/dzn/parse.py | dzn2dict | def dzn2dict(dzn, *, rebase_arrays=True, types=None, return_enums=False):
"""Parses a dzn string or file into a dictionary of variable assignments.
Parameters
----------
dzn : str
A dzn content string or a path to a dzn file.
rebase_arrays : bool
Whether to return arrays as zero-based lists or to return them as
dictionaries, preserving the original index-sets.
types : dict
Dictionary of variable types. Types can either be dictionaries, as
returned by the ``minizinc --model-types-only``, or strings containing a
type in dzn format. If the type is a string, it can either be the name
of an enum type or one of the following: ``bool``, ``int``, ``float``,
``enum``, ``set of <type>``, ``array[<index_sets>] of <type>``. The
default value for ``var_types`` is ``None``, in which case the type of
most dzn assignments will be inferred automatically from the value. Enum
values can only be parsed if their respective types are available.
return_enums : bool
Whether to return the parsed enum types included in the dzn content.
Returns
-------
dict
A dictionary containing the variable assignments parsed from the
input file or string.
"""
dzn_ext = os.path.splitext(dzn)[1]
if dzn_ext == '.dzn':
with open(dzn) as f:
dzn = f.read()
var_types = None
if types:
var_types = {}
for var, var_type in types.items():
if isinstance(var_type, str):
var_types[var] = _to_var_type(var, var_type)
elif isinstance(var_type, dict):
var_types[var] = var_type
else:
err = 'Type of variable {} must be a string or a dict.'
raise ValueError(err.format(var))
enum_types = None
if var_types:
enum_types = []
for var, var_type in var_types.items():
if 'enum_type' in var_type and var_type['enum_type'] == var:
enum_types.append(var)
var_list = []
dzn = _comm_p.sub('\n', dzn)
stmts = _stmt_p.findall(dzn)
for stmt in stmts:
var_m = _var_p.match(stmt)
if var_m:
var = var_m.group('var')
val = var_m.group('val')
var_list.append((var, val))
else:
raise ValueError(
'Unsupported parsing for statement:\n{}'.format(repr(stmt))
)
enums = None
if enum_types:
enums = {}
remaining = []
while len(var_list) > 0:
var, val = var_list.pop(0)
if var in enum_types:
enum = None
enum_m = _enum_p.match(val)
if enum_m:
vals = enum_m.group('vals').strip()
if vals:
enum_vals = _parse_enum_vals(vals.split(','))
enum = IntEnum(
var, {v: i + 1 for i, v in enumerate(enum_vals)}
)
if enum is None:
raise ValueError(
'Cannot parse enum type \'{} = {}\'.'.format(var, val)
)
enums[var] = enum
else:
remaining.append((var, val))
var_list = remaining
assign = {}
for var, val in var_list:
var_type = None
if var_types:
var_type = var_types.get(var, None)
assign[var] = parse_value(
val, var_type=var_type, enums=enums, rebase_arrays=rebase_arrays
)
if return_enums and enums:
assign.update(enums)
return assign | python | def dzn2dict(dzn, *, rebase_arrays=True, types=None, return_enums=False):
"""Parses a dzn string or file into a dictionary of variable assignments.
Parameters
----------
dzn : str
A dzn content string or a path to a dzn file.
rebase_arrays : bool
Whether to return arrays as zero-based lists or to return them as
dictionaries, preserving the original index-sets.
types : dict
Dictionary of variable types. Types can either be dictionaries, as
returned by the ``minizinc --model-types-only``, or strings containing a
type in dzn format. If the type is a string, it can either be the name
of an enum type or one of the following: ``bool``, ``int``, ``float``,
``enum``, ``set of <type>``, ``array[<index_sets>] of <type>``. The
default value for ``var_types`` is ``None``, in which case the type of
most dzn assignments will be inferred automatically from the value. Enum
values can only be parsed if their respective types are available.
return_enums : bool
Whether to return the parsed enum types included in the dzn content.
Returns
-------
dict
A dictionary containing the variable assignments parsed from the
input file or string.
"""
dzn_ext = os.path.splitext(dzn)[1]
if dzn_ext == '.dzn':
with open(dzn) as f:
dzn = f.read()
var_types = None
if types:
var_types = {}
for var, var_type in types.items():
if isinstance(var_type, str):
var_types[var] = _to_var_type(var, var_type)
elif isinstance(var_type, dict):
var_types[var] = var_type
else:
err = 'Type of variable {} must be a string or a dict.'
raise ValueError(err.format(var))
enum_types = None
if var_types:
enum_types = []
for var, var_type in var_types.items():
if 'enum_type' in var_type and var_type['enum_type'] == var:
enum_types.append(var)
var_list = []
dzn = _comm_p.sub('\n', dzn)
stmts = _stmt_p.findall(dzn)
for stmt in stmts:
var_m = _var_p.match(stmt)
if var_m:
var = var_m.group('var')
val = var_m.group('val')
var_list.append((var, val))
else:
raise ValueError(
'Unsupported parsing for statement:\n{}'.format(repr(stmt))
)
enums = None
if enum_types:
enums = {}
remaining = []
while len(var_list) > 0:
var, val = var_list.pop(0)
if var in enum_types:
enum = None
enum_m = _enum_p.match(val)
if enum_m:
vals = enum_m.group('vals').strip()
if vals:
enum_vals = _parse_enum_vals(vals.split(','))
enum = IntEnum(
var, {v: i + 1 for i, v in enumerate(enum_vals)}
)
if enum is None:
raise ValueError(
'Cannot parse enum type \'{} = {}\'.'.format(var, val)
)
enums[var] = enum
else:
remaining.append((var, val))
var_list = remaining
assign = {}
for var, val in var_list:
var_type = None
if var_types:
var_type = var_types.get(var, None)
assign[var] = parse_value(
val, var_type=var_type, enums=enums, rebase_arrays=rebase_arrays
)
if return_enums and enums:
assign.update(enums)
return assign | [
"def",
"dzn2dict",
"(",
"dzn",
",",
"*",
",",
"rebase_arrays",
"=",
"True",
",",
"types",
"=",
"None",
",",
"return_enums",
"=",
"False",
")",
":",
"dzn_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"dzn",
")",
"[",
"1",
"]",
"if",
"dzn_ext",
"==",
"'.dzn'",
":",
"with",
"open",
"(",
"dzn",
")",
"as",
"f",
":",
"dzn",
"=",
"f",
".",
"read",
"(",
")",
"var_types",
"=",
"None",
"if",
"types",
":",
"var_types",
"=",
"{",
"}",
"for",
"var",
",",
"var_type",
"in",
"types",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"var_type",
",",
"str",
")",
":",
"var_types",
"[",
"var",
"]",
"=",
"_to_var_type",
"(",
"var",
",",
"var_type",
")",
"elif",
"isinstance",
"(",
"var_type",
",",
"dict",
")",
":",
"var_types",
"[",
"var",
"]",
"=",
"var_type",
"else",
":",
"err",
"=",
"'Type of variable {} must be a string or a dict.'",
"raise",
"ValueError",
"(",
"err",
".",
"format",
"(",
"var",
")",
")",
"enum_types",
"=",
"None",
"if",
"var_types",
":",
"enum_types",
"=",
"[",
"]",
"for",
"var",
",",
"var_type",
"in",
"var_types",
".",
"items",
"(",
")",
":",
"if",
"'enum_type'",
"in",
"var_type",
"and",
"var_type",
"[",
"'enum_type'",
"]",
"==",
"var",
":",
"enum_types",
".",
"append",
"(",
"var",
")",
"var_list",
"=",
"[",
"]",
"dzn",
"=",
"_comm_p",
".",
"sub",
"(",
"'\\n'",
",",
"dzn",
")",
"stmts",
"=",
"_stmt_p",
".",
"findall",
"(",
"dzn",
")",
"for",
"stmt",
"in",
"stmts",
":",
"var_m",
"=",
"_var_p",
".",
"match",
"(",
"stmt",
")",
"if",
"var_m",
":",
"var",
"=",
"var_m",
".",
"group",
"(",
"'var'",
")",
"val",
"=",
"var_m",
".",
"group",
"(",
"'val'",
")",
"var_list",
".",
"append",
"(",
"(",
"var",
",",
"val",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported parsing for statement:\\n{}'",
".",
"format",
"(",
"repr",
"(",
"stmt",
")",
")",
")",
"enums",
"=",
"None",
"if",
"enum_types",
":",
"enums",
"=",
"{",
"}",
"remaining",
"=",
"[",
"]",
"while",
"len",
"(",
"var_list",
")",
">",
"0",
":",
"var",
",",
"val",
"=",
"var_list",
".",
"pop",
"(",
"0",
")",
"if",
"var",
"in",
"enum_types",
":",
"enum",
"=",
"None",
"enum_m",
"=",
"_enum_p",
".",
"match",
"(",
"val",
")",
"if",
"enum_m",
":",
"vals",
"=",
"enum_m",
".",
"group",
"(",
"'vals'",
")",
".",
"strip",
"(",
")",
"if",
"vals",
":",
"enum_vals",
"=",
"_parse_enum_vals",
"(",
"vals",
".",
"split",
"(",
"','",
")",
")",
"enum",
"=",
"IntEnum",
"(",
"var",
",",
"{",
"v",
":",
"i",
"+",
"1",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"enum_vals",
")",
"}",
")",
"if",
"enum",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Cannot parse enum type \\'{} = {}\\'.'",
".",
"format",
"(",
"var",
",",
"val",
")",
")",
"enums",
"[",
"var",
"]",
"=",
"enum",
"else",
":",
"remaining",
".",
"append",
"(",
"(",
"var",
",",
"val",
")",
")",
"var_list",
"=",
"remaining",
"assign",
"=",
"{",
"}",
"for",
"var",
",",
"val",
"in",
"var_list",
":",
"var_type",
"=",
"None",
"if",
"var_types",
":",
"var_type",
"=",
"var_types",
".",
"get",
"(",
"var",
",",
"None",
")",
"assign",
"[",
"var",
"]",
"=",
"parse_value",
"(",
"val",
",",
"var_type",
"=",
"var_type",
",",
"enums",
"=",
"enums",
",",
"rebase_arrays",
"=",
"rebase_arrays",
")",
"if",
"return_enums",
"and",
"enums",
":",
"assign",
".",
"update",
"(",
"enums",
")",
"return",
"assign"
]
| Parses a dzn string or file into a dictionary of variable assignments.
Parameters
----------
dzn : str
A dzn content string or a path to a dzn file.
rebase_arrays : bool
Whether to return arrays as zero-based lists or to return them as
dictionaries, preserving the original index-sets.
types : dict
Dictionary of variable types. Types can either be dictionaries, as
returned by the ``minizinc --model-types-only``, or strings containing a
type in dzn format. If the type is a string, it can either be the name
of an enum type or one of the following: ``bool``, ``int``, ``float``,
``enum``, ``set of <type>``, ``array[<index_sets>] of <type>``. The
default value for ``var_types`` is ``None``, in which case the type of
most dzn assignments will be inferred automatically from the value. Enum
values can only be parsed if their respective types are available.
return_enums : bool
Whether to return the parsed enum types included in the dzn content.
Returns
-------
dict
A dictionary containing the variable assignments parsed from the
input file or string. | [
"Parses",
"a",
"dzn",
"string",
"or",
"file",
"into",
"a",
"dictionary",
"of",
"variable",
"assignments",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/dzn/parse.py#L490-L593 | train |
paolodragone/pymzn | pymzn/mzn/solvers.py | Solver.args | def args(
self, all_solutions=False, num_solutions=None, free_search=False,
parallel=None, seed=None, **kwargs
):
"""Returns a list of command line arguments for the specified options.
If the solver parser is able to parse statistics, this function should
always add options to display statistics.
Parameters
----------
all_solutions : bool
Whether all the solutions must be returned (default is False).
num_solutions : int
The maximum number of solutions to be returned (only used in
satisfation problems).
free_search : bool
Whether the solver should be instructed to perform a free search.
parallel : int
The number of parallel threads the solver should use.
seed : int
The random number generator seed to pass to the solver.
"""
args = ['-s', '-v']
if all_solutions:
args.append('-a')
if num_solutions is not None:
args += ['-n', num_solutions]
if free_search:
args.append('-f')
if parallel is not None:
args += ['-p', parallel]
if seed is not None:
args += ['-r', seed]
return args | python | def args(
self, all_solutions=False, num_solutions=None, free_search=False,
parallel=None, seed=None, **kwargs
):
"""Returns a list of command line arguments for the specified options.
If the solver parser is able to parse statistics, this function should
always add options to display statistics.
Parameters
----------
all_solutions : bool
Whether all the solutions must be returned (default is False).
num_solutions : int
The maximum number of solutions to be returned (only used in
satisfation problems).
free_search : bool
Whether the solver should be instructed to perform a free search.
parallel : int
The number of parallel threads the solver should use.
seed : int
The random number generator seed to pass to the solver.
"""
args = ['-s', '-v']
if all_solutions:
args.append('-a')
if num_solutions is not None:
args += ['-n', num_solutions]
if free_search:
args.append('-f')
if parallel is not None:
args += ['-p', parallel]
if seed is not None:
args += ['-r', seed]
return args | [
"def",
"args",
"(",
"self",
",",
"all_solutions",
"=",
"False",
",",
"num_solutions",
"=",
"None",
",",
"free_search",
"=",
"False",
",",
"parallel",
"=",
"None",
",",
"seed",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"[",
"'-s'",
",",
"'-v'",
"]",
"if",
"all_solutions",
":",
"args",
".",
"append",
"(",
"'-a'",
")",
"if",
"num_solutions",
"is",
"not",
"None",
":",
"args",
"+=",
"[",
"'-n'",
",",
"num_solutions",
"]",
"if",
"free_search",
":",
"args",
".",
"append",
"(",
"'-f'",
")",
"if",
"parallel",
"is",
"not",
"None",
":",
"args",
"+=",
"[",
"'-p'",
",",
"parallel",
"]",
"if",
"seed",
"is",
"not",
"None",
":",
"args",
"+=",
"[",
"'-r'",
",",
"seed",
"]",
"return",
"args"
]
| Returns a list of command line arguments for the specified options.
If the solver parser is able to parse statistics, this function should
always add options to display statistics.
Parameters
----------
all_solutions : bool
Whether all the solutions must be returned (default is False).
num_solutions : int
The maximum number of solutions to be returned (only used in
satisfation problems).
free_search : bool
Whether the solver should be instructed to perform a free search.
parallel : int
The number of parallel threads the solver should use.
seed : int
The random number generator seed to pass to the solver. | [
"Returns",
"a",
"list",
"of",
"command",
"line",
"arguments",
"for",
"the",
"specified",
"options",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/solvers.py#L96-L130 | train |
paolodragone/pymzn | pymzn/log.py | debug | def debug(dbg=True):
"""Enables or disables debugging messages on the standard output."""
global _debug_handler
if dbg and _debug_handler is None:
_debug_handler = logging.StreamHandler()
logger.addHandler(_debug_handler)
logger.setLevel(logging.DEBUG)
elif not dbg and _debug_handler is not None:
logger.removeHandler(_debug_handler)
_debug_handler = None
logger.setLevel(logging.WARNING) | python | def debug(dbg=True):
"""Enables or disables debugging messages on the standard output."""
global _debug_handler
if dbg and _debug_handler is None:
_debug_handler = logging.StreamHandler()
logger.addHandler(_debug_handler)
logger.setLevel(logging.DEBUG)
elif not dbg and _debug_handler is not None:
logger.removeHandler(_debug_handler)
_debug_handler = None
logger.setLevel(logging.WARNING) | [
"def",
"debug",
"(",
"dbg",
"=",
"True",
")",
":",
"global",
"_debug_handler",
"if",
"dbg",
"and",
"_debug_handler",
"is",
"None",
":",
"_debug_handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"logger",
".",
"addHandler",
"(",
"_debug_handler",
")",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"elif",
"not",
"dbg",
"and",
"_debug_handler",
"is",
"not",
"None",
":",
"logger",
".",
"removeHandler",
"(",
"_debug_handler",
")",
"_debug_handler",
"=",
"None",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"WARNING",
")"
]
| Enables or disables debugging messages on the standard output. | [
"Enables",
"or",
"disables",
"debugging",
"messages",
"on",
"the",
"standard",
"output",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/log.py#L15-L25 | train |
paolodragone/pymzn | pymzn/mzn/minizinc.py | minizinc_version | def minizinc_version():
"""Returns the version of the found minizinc executable."""
vs = _run_minizinc('--version')
m = re.findall('version ([\d\.]+)', vs)
if not m:
raise RuntimeError('MiniZinc executable not found.')
return m[0] | python | def minizinc_version():
"""Returns the version of the found minizinc executable."""
vs = _run_minizinc('--version')
m = re.findall('version ([\d\.]+)', vs)
if not m:
raise RuntimeError('MiniZinc executable not found.')
return m[0] | [
"def",
"minizinc_version",
"(",
")",
":",
"vs",
"=",
"_run_minizinc",
"(",
"'--version'",
")",
"m",
"=",
"re",
".",
"findall",
"(",
"'version ([\\d\\.]+)'",
",",
"vs",
")",
"if",
"not",
"m",
":",
"raise",
"RuntimeError",
"(",
"'MiniZinc executable not found.'",
")",
"return",
"m",
"[",
"0",
"]"
]
| Returns the version of the found minizinc executable. | [
"Returns",
"the",
"version",
"of",
"the",
"found",
"minizinc",
"executable",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L64-L70 | train |
paolodragone/pymzn | pymzn/mzn/minizinc.py | preprocess_model | def preprocess_model(model, rewrap=True, **kwargs):
"""Preprocess a MiniZinc model.
This function takes care of preprocessing the model by resolving the
template using the arguments passed as keyword arguments to this function.
Optionally, this function can also "rewrap" the model, deleting spaces at
the beginning of the lines while preserving indentation.
Parameters
----------
model : str
The minizinc model (i.e. the content of a ``.mzn`` file).
rewrap : bool
Whether to "rewrap" the model, i.e. to delete leading spaces, while
preserving indentation. Default is ``True``.
**kwargs
Additional arguments to pass to the template engine.
Returns
-------
str
The preprocessed model.
"""
args = {**kwargs, **config.get('args', {})}
model = _process_template(model, **args)
if rewrap:
model = rewrap_model(model)
return model | python | def preprocess_model(model, rewrap=True, **kwargs):
"""Preprocess a MiniZinc model.
This function takes care of preprocessing the model by resolving the
template using the arguments passed as keyword arguments to this function.
Optionally, this function can also "rewrap" the model, deleting spaces at
the beginning of the lines while preserving indentation.
Parameters
----------
model : str
The minizinc model (i.e. the content of a ``.mzn`` file).
rewrap : bool
Whether to "rewrap" the model, i.e. to delete leading spaces, while
preserving indentation. Default is ``True``.
**kwargs
Additional arguments to pass to the template engine.
Returns
-------
str
The preprocessed model.
"""
args = {**kwargs, **config.get('args', {})}
model = _process_template(model, **args)
if rewrap:
model = rewrap_model(model)
return model | [
"def",
"preprocess_model",
"(",
"model",
",",
"rewrap",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"{",
"*",
"*",
"kwargs",
",",
"*",
"*",
"config",
".",
"get",
"(",
"'args'",
",",
"{",
"}",
")",
"}",
"model",
"=",
"_process_template",
"(",
"model",
",",
"*",
"*",
"args",
")",
"if",
"rewrap",
":",
"model",
"=",
"rewrap_model",
"(",
"model",
")",
"return",
"model"
]
| Preprocess a MiniZinc model.
This function takes care of preprocessing the model by resolving the
template using the arguments passed as keyword arguments to this function.
Optionally, this function can also "rewrap" the model, deleting spaces at
the beginning of the lines while preserving indentation.
Parameters
----------
model : str
The minizinc model (i.e. the content of a ``.mzn`` file).
rewrap : bool
Whether to "rewrap" the model, i.e. to delete leading spaces, while
preserving indentation. Default is ``True``.
**kwargs
Additional arguments to pass to the template engine.
Returns
-------
str
The preprocessed model. | [
"Preprocess",
"a",
"MiniZinc",
"model",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L179-L209 | train |
paolodragone/pymzn | pymzn/mzn/minizinc.py | save_model | def save_model(model, output_file=None, output_dir=None, output_prefix='pymzn'):
"""Save a model to file.
Parameters
----------
model : str
The minizinc model (i.e. the content of a ``.mzn`` file).
output_file : str
The path to the output file. If this parameter is ``None`` (default), a
temporary file is created with the given model in the specified output
directory, using the specified prefix.
output_dir : str
The directory where to create the file in case ``output_file`` is None.
Default is ``None``, which creates a file in the system temporary directory.
output_prefix : str
The prefix for the output file if created. Default is ``'pymzn'``.
Returns
-------
str
The path to the newly created ``.mzn`` file.
"""
if output_file:
mzn_file = output_file
output_file = open(output_file, 'w+', buffering=1)
else:
output_prefix += '_'
output_file = NamedTemporaryFile(
dir=output_dir, prefix=output_prefix, suffix='.mzn', delete=False,
mode='w+', buffering=1
)
mzn_file = output_file.name
output_file.write(model)
output_file.close()
logger.info('Generated file {}'.format(mzn_file))
return mzn_file | python | def save_model(model, output_file=None, output_dir=None, output_prefix='pymzn'):
"""Save a model to file.
Parameters
----------
model : str
The minizinc model (i.e. the content of a ``.mzn`` file).
output_file : str
The path to the output file. If this parameter is ``None`` (default), a
temporary file is created with the given model in the specified output
directory, using the specified prefix.
output_dir : str
The directory where to create the file in case ``output_file`` is None.
Default is ``None``, which creates a file in the system temporary directory.
output_prefix : str
The prefix for the output file if created. Default is ``'pymzn'``.
Returns
-------
str
The path to the newly created ``.mzn`` file.
"""
if output_file:
mzn_file = output_file
output_file = open(output_file, 'w+', buffering=1)
else:
output_prefix += '_'
output_file = NamedTemporaryFile(
dir=output_dir, prefix=output_prefix, suffix='.mzn', delete=False,
mode='w+', buffering=1
)
mzn_file = output_file.name
output_file.write(model)
output_file.close()
logger.info('Generated file {}'.format(mzn_file))
return mzn_file | [
"def",
"save_model",
"(",
"model",
",",
"output_file",
"=",
"None",
",",
"output_dir",
"=",
"None",
",",
"output_prefix",
"=",
"'pymzn'",
")",
":",
"if",
"output_file",
":",
"mzn_file",
"=",
"output_file",
"output_file",
"=",
"open",
"(",
"output_file",
",",
"'w+'",
",",
"buffering",
"=",
"1",
")",
"else",
":",
"output_prefix",
"+=",
"'_'",
"output_file",
"=",
"NamedTemporaryFile",
"(",
"dir",
"=",
"output_dir",
",",
"prefix",
"=",
"output_prefix",
",",
"suffix",
"=",
"'.mzn'",
",",
"delete",
"=",
"False",
",",
"mode",
"=",
"'w+'",
",",
"buffering",
"=",
"1",
")",
"mzn_file",
"=",
"output_file",
".",
"name",
"output_file",
".",
"write",
"(",
"model",
")",
"output_file",
".",
"close",
"(",
")",
"logger",
".",
"info",
"(",
"'Generated file {}'",
".",
"format",
"(",
"mzn_file",
")",
")",
"return",
"mzn_file"
]
| Save a model to file.
Parameters
----------
model : str
The minizinc model (i.e. the content of a ``.mzn`` file).
output_file : str
The path to the output file. If this parameter is ``None`` (default), a
temporary file is created with the given model in the specified output
directory, using the specified prefix.
output_dir : str
The directory where to create the file in case ``output_file`` is None.
Default is ``None``, which creates a file in the system temporary directory.
output_prefix : str
The prefix for the output file if created. Default is ``'pymzn'``.
Returns
-------
str
The path to the newly created ``.mzn`` file. | [
"Save",
"a",
"model",
"to",
"file",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L212-L249 | train |
paolodragone/pymzn | pymzn/mzn/minizinc.py | check_instance | def check_instance(
mzn, *dzn_files, data=None, include=None, stdlib_dir=None, globals_dir=None,
allow_multiple_assignments=False
):
"""Perform instance checking on a model + data.
This function calls the command ``minizinc --instance-check-only`` to check
for consistency of the given model + data.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : dict
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
Raises
------
``MiniZincError`` if instance checking fails.
"""
args = ['--instance-check-only']
args += _flattening_args(
mzn, *dzn_files, data=data, include=include, stdlib_dir=stdlib_dir,
globals_dir=globals_dir,
allow_multiple_assignments=allow_multiple_assignments
)
input = mzn if args[-1] == '-' else None
proc = _run_minizinc_proc(*args, input=input)
if proc.stderr_data:
raise MiniZincError(
mzn if input is None else '\n' + mzn + '\n', args, proc.stderr_data
) | python | def check_instance(
mzn, *dzn_files, data=None, include=None, stdlib_dir=None, globals_dir=None,
allow_multiple_assignments=False
):
"""Perform instance checking on a model + data.
This function calls the command ``minizinc --instance-check-only`` to check
for consistency of the given model + data.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : dict
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
Raises
------
``MiniZincError`` if instance checking fails.
"""
args = ['--instance-check-only']
args += _flattening_args(
mzn, *dzn_files, data=data, include=include, stdlib_dir=stdlib_dir,
globals_dir=globals_dir,
allow_multiple_assignments=allow_multiple_assignments
)
input = mzn if args[-1] == '-' else None
proc = _run_minizinc_proc(*args, input=input)
if proc.stderr_data:
raise MiniZincError(
mzn if input is None else '\n' + mzn + '\n', args, proc.stderr_data
) | [
"def",
"check_instance",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"data",
"=",
"None",
",",
"include",
"=",
"None",
",",
"stdlib_dir",
"=",
"None",
",",
"globals_dir",
"=",
"None",
",",
"allow_multiple_assignments",
"=",
"False",
")",
":",
"args",
"=",
"[",
"'--instance-check-only'",
"]",
"args",
"+=",
"_flattening_args",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"data",
"=",
"data",
",",
"include",
"=",
"include",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
")",
"input",
"=",
"mzn",
"if",
"args",
"[",
"-",
"1",
"]",
"==",
"'-'",
"else",
"None",
"proc",
"=",
"_run_minizinc_proc",
"(",
"*",
"args",
",",
"input",
"=",
"input",
")",
"if",
"proc",
".",
"stderr_data",
":",
"raise",
"MiniZincError",
"(",
"mzn",
"if",
"input",
"is",
"None",
"else",
"'\\n'",
"+",
"mzn",
"+",
"'\\n'",
",",
"args",
",",
"proc",
".",
"stderr_data",
")"
]
| Perform instance checking on a model + data.
This function calls the command ``minizinc --instance-check-only`` to check
for consistency of the given model + data.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : dict
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
Raises
------
``MiniZincError`` if instance checking fails. | [
"Perform",
"instance",
"checking",
"on",
"a",
"model",
"+",
"data",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L327-L378 | train |
paolodragone/pymzn | pymzn/mzn/minizinc.py | check_model | def check_model(
mzn, *, include=None, stdlib_dir=None, globals_dir=None
):
"""Perform model checking on a given model.
This function calls the command ``minizinc --model-check-only`` to check
for consistency of the given model.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
Raises
------
``MiniZincError`` if model checking fails.
"""
args = ['--model-check-only']
args += _flattening_args(
mzn, include=include, stdlib_dir=stdlib_dir, globals_dir=globals_dir
)
input = mzn if args[-1] == '-' else None
proc = _run_minizinc_proc(*args, input=input)
if proc.stderr_data:
raise MiniZincError(
mzn if input is None else '\n' + mzn + '\n', args, proc.stderr_data
) | python | def check_model(
mzn, *, include=None, stdlib_dir=None, globals_dir=None
):
"""Perform model checking on a given model.
This function calls the command ``minizinc --model-check-only`` to check
for consistency of the given model.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
Raises
------
``MiniZincError`` if model checking fails.
"""
args = ['--model-check-only']
args += _flattening_args(
mzn, include=include, stdlib_dir=stdlib_dir, globals_dir=globals_dir
)
input = mzn if args[-1] == '-' else None
proc = _run_minizinc_proc(*args, input=input)
if proc.stderr_data:
raise MiniZincError(
mzn if input is None else '\n' + mzn + '\n', args, proc.stderr_data
) | [
"def",
"check_model",
"(",
"mzn",
",",
"*",
",",
"include",
"=",
"None",
",",
"stdlib_dir",
"=",
"None",
",",
"globals_dir",
"=",
"None",
")",
":",
"args",
"=",
"[",
"'--model-check-only'",
"]",
"args",
"+=",
"_flattening_args",
"(",
"mzn",
",",
"include",
"=",
"include",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
")",
"input",
"=",
"mzn",
"if",
"args",
"[",
"-",
"1",
"]",
"==",
"'-'",
"else",
"None",
"proc",
"=",
"_run_minizinc_proc",
"(",
"*",
"args",
",",
"input",
"=",
"input",
")",
"if",
"proc",
".",
"stderr_data",
":",
"raise",
"MiniZincError",
"(",
"mzn",
"if",
"input",
"is",
"None",
"else",
"'\\n'",
"+",
"mzn",
"+",
"'\\n'",
",",
"args",
",",
"proc",
".",
"stderr_data",
")"
]
| Perform model checking on a given model.
This function calls the command ``minizinc --model-check-only`` to check
for consistency of the given model.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
Raises
------
``MiniZincError`` if model checking fails. | [
"Perform",
"model",
"checking",
"on",
"a",
"given",
"model",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L381-L419 | train |
paolodragone/pymzn | pymzn/mzn/minizinc.py | minizinc | def minizinc(
mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None,
globals_dir=None, declare_enums=True, allow_multiple_assignments=False,
keep=False, output_vars=None, output_base=None, output_mode='dict',
solver=None, timeout=None, two_pass=None, pre_passes=None,
output_objective=False, non_unique=False, all_solutions=False,
num_solutions=None, free_search=False, parallel=None, seed=None,
rebase_arrays=True, keep_solutions=True, return_enums=False, **kwargs
):
"""Implements the workflow for solving a CSP problem encoded with MiniZinc.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
args : dict
Arguments for the template engine.
data : dict
Additional data as a dictionary of variables assignments to supply to
the minizinc executable. The dictionary is automatically converted to
dzn format by the ``pymzn.dict2dzn`` function.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
declare_enums : bool
Whether to declare enum types when converting inline data into dzn
format. If the enum types are declared elsewhere this option should be
False. Default is ``True``.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
keep : bool
Whether to keep the generated ``.mzn``, ``.dzn``, ``.fzn`` and ``.ozn``
files or not. If False, the generated files are created as temporary
files which will be deleted right after the problem is solved. Though
files generated by PyMzn are not intended to be kept, this property can
be used for debugging purpose. Note that in case of error the files are
not deleted even if this parameter is ``False``. Default is ``False``.
output_vars : list of str
A list of output variables. These variables will be the ones included in
the output dictionary. Only available if ``ouptut_mode='dict'``.
output_base : str
Output directory for the files generated by PyMzn. The default
(``None``) is the temporary directory of your OS (if ``keep=False``) or
the current working directory (if ``keep=True``).
output_mode : {'dict', 'item', 'dzn', 'json', 'raw'}
The desired output format. The default is ``'dict'`` which returns a
stream of solutions decoded as python dictionaries. The ``'item'``
format outputs a stream of strings as returned by the ``solns2out``
tool, formatted according to the output statement of the MiniZinc model.
The ``'dzn'`` and ``'json'`` formats output a stream of strings
formatted in dzn of json respectively. The ``'raw'`` format, instead
returns the whole solution stream, without parsing.
solver : Solver
The ``Solver`` instance to use. The default solver is ``gecode``.
timeout : int
The timeout in seconds for the flattening + solving process.
two_pass : bool or int
If ``two_pass`` is True, then it is equivalent to the ``--two-pass``
option for the ``minizinc`` executable. If ``two_pass`` is an integer
``<n>``, instead, it is equivalent to the ``-O<n>`` option for the
``minizinc`` executable.
pre_passes : int
Equivalent to the ``--pre-passes`` option for the ``minizinc``
executable.
output_objective : bool
Equivalent to the ``--output-objective`` option for the ``minizinc``
executable. Adds a field ``_objective`` to all solutions.
non_unique : bool
Equivalent to the ``--non-unique`` option for the ``minizinc``
executable.
all_solutions : bool
Whether all the solutions must be returned. This option might not work
if the solver does not support it. Default is ``False``.
num_solutions : int
The upper bound on the number of solutions to be returned. This option
might not work if the solver does not support it. Default is ``1``.
free_search : bool
If ``True``, instruct the solver to perform free search.
parallel : int
The number of parallel threads the solver can utilize for the solving.
seed : int
The random number generator seed to pass to the solver.
rebase_arrays : bool
Whether to "rebase" parsed arrays (see the `Dzn files
<http://paolodragone.com/pymzn/reference/dzn>`__ section). Default is
True.
keep_solutions : bool
Whether to store the solutions in memory after solving is done. If
``keep_solutions`` is ``False``, the returned solution stream can only
be iterated once and cannot be addressed as a list.
return_enums : bool
Wheter to return enum types along with the variable assignments in the
solutions. Only used if ``output_mode='dict'``. Default is ``False``.
**kwargs
Additional arguments to pass to the solver, provided as additional
keyword arguments to this function. Check the solver documentation for
the available arguments.
Returns
-------
Solutions or str
If ``output_mode`` is not ``'raw'``, returns a list-like object
containing the solutions found by the solver. The format of the solution
depends on the specified ``output_mode``. If ``keep_solutions=False``,
the returned object cannot be addressed as a list and can only be
iterated once. If ``output_mode='raw'``, the function returns the whole
solution stream as a single string.
"""
mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \
_minizinc_preliminaries(
mzn, *dzn_files, args=args, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_vars=output_vars, keep=keep, output_base=output_base,
output_mode=output_mode, declare_enums=declare_enums,
allow_multiple_assignments=allow_multiple_assignments
)
if not solver:
solver = config.get('solver', gecode)
solver_args = {**kwargs, **config.get('solver_args', {})}
proc = solve(
solver, mzn_file, *dzn_files, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_mode=_output_mode, timeout=timeout, two_pass=two_pass,
pre_passes=pre_passes, output_objective=output_objective,
non_unique=non_unique, all_solutions=all_solutions,
num_solutions=num_solutions, free_search=free_search, parallel=parallel,
seed=seed, allow_multiple_assignments=allow_multiple_assignments,
**solver_args
)
if not keep:
_cleanup([mzn_file, data_file])
if output_mode == 'raw':
return proc.stdout_data
parser = SolutionParser(
solver, output_mode=output_mode, rebase_arrays=rebase_arrays,
types=types, keep_solutions=keep_solutions, return_enums=return_enums
)
solns = parser.parse(proc)
return solns | python | def minizinc(
mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None,
globals_dir=None, declare_enums=True, allow_multiple_assignments=False,
keep=False, output_vars=None, output_base=None, output_mode='dict',
solver=None, timeout=None, two_pass=None, pre_passes=None,
output_objective=False, non_unique=False, all_solutions=False,
num_solutions=None, free_search=False, parallel=None, seed=None,
rebase_arrays=True, keep_solutions=True, return_enums=False, **kwargs
):
"""Implements the workflow for solving a CSP problem encoded with MiniZinc.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
args : dict
Arguments for the template engine.
data : dict
Additional data as a dictionary of variables assignments to supply to
the minizinc executable. The dictionary is automatically converted to
dzn format by the ``pymzn.dict2dzn`` function.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
declare_enums : bool
Whether to declare enum types when converting inline data into dzn
format. If the enum types are declared elsewhere this option should be
False. Default is ``True``.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
keep : bool
Whether to keep the generated ``.mzn``, ``.dzn``, ``.fzn`` and ``.ozn``
files or not. If False, the generated files are created as temporary
files which will be deleted right after the problem is solved. Though
files generated by PyMzn are not intended to be kept, this property can
be used for debugging purpose. Note that in case of error the files are
not deleted even if this parameter is ``False``. Default is ``False``.
output_vars : list of str
A list of output variables. These variables will be the ones included in
the output dictionary. Only available if ``ouptut_mode='dict'``.
output_base : str
Output directory for the files generated by PyMzn. The default
(``None``) is the temporary directory of your OS (if ``keep=False``) or
the current working directory (if ``keep=True``).
output_mode : {'dict', 'item', 'dzn', 'json', 'raw'}
The desired output format. The default is ``'dict'`` which returns a
stream of solutions decoded as python dictionaries. The ``'item'``
format outputs a stream of strings as returned by the ``solns2out``
tool, formatted according to the output statement of the MiniZinc model.
The ``'dzn'`` and ``'json'`` formats output a stream of strings
formatted in dzn of json respectively. The ``'raw'`` format, instead
returns the whole solution stream, without parsing.
solver : Solver
The ``Solver`` instance to use. The default solver is ``gecode``.
timeout : int
The timeout in seconds for the flattening + solving process.
two_pass : bool or int
If ``two_pass`` is True, then it is equivalent to the ``--two-pass``
option for the ``minizinc`` executable. If ``two_pass`` is an integer
``<n>``, instead, it is equivalent to the ``-O<n>`` option for the
``minizinc`` executable.
pre_passes : int
Equivalent to the ``--pre-passes`` option for the ``minizinc``
executable.
output_objective : bool
Equivalent to the ``--output-objective`` option for the ``minizinc``
executable. Adds a field ``_objective`` to all solutions.
non_unique : bool
Equivalent to the ``--non-unique`` option for the ``minizinc``
executable.
all_solutions : bool
Whether all the solutions must be returned. This option might not work
if the solver does not support it. Default is ``False``.
num_solutions : int
The upper bound on the number of solutions to be returned. This option
might not work if the solver does not support it. Default is ``1``.
free_search : bool
If ``True``, instruct the solver to perform free search.
parallel : int
The number of parallel threads the solver can utilize for the solving.
seed : int
The random number generator seed to pass to the solver.
rebase_arrays : bool
Whether to "rebase" parsed arrays (see the `Dzn files
<http://paolodragone.com/pymzn/reference/dzn>`__ section). Default is
True.
keep_solutions : bool
Whether to store the solutions in memory after solving is done. If
``keep_solutions`` is ``False``, the returned solution stream can only
be iterated once and cannot be addressed as a list.
return_enums : bool
Wheter to return enum types along with the variable assignments in the
solutions. Only used if ``output_mode='dict'``. Default is ``False``.
**kwargs
Additional arguments to pass to the solver, provided as additional
keyword arguments to this function. Check the solver documentation for
the available arguments.
Returns
-------
Solutions or str
If ``output_mode`` is not ``'raw'``, returns a list-like object
containing the solutions found by the solver. The format of the solution
depends on the specified ``output_mode``. If ``keep_solutions=False``,
the returned object cannot be addressed as a list and can only be
iterated once. If ``output_mode='raw'``, the function returns the whole
solution stream as a single string.
"""
mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \
_minizinc_preliminaries(
mzn, *dzn_files, args=args, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_vars=output_vars, keep=keep, output_base=output_base,
output_mode=output_mode, declare_enums=declare_enums,
allow_multiple_assignments=allow_multiple_assignments
)
if not solver:
solver = config.get('solver', gecode)
solver_args = {**kwargs, **config.get('solver_args', {})}
proc = solve(
solver, mzn_file, *dzn_files, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_mode=_output_mode, timeout=timeout, two_pass=two_pass,
pre_passes=pre_passes, output_objective=output_objective,
non_unique=non_unique, all_solutions=all_solutions,
num_solutions=num_solutions, free_search=free_search, parallel=parallel,
seed=seed, allow_multiple_assignments=allow_multiple_assignments,
**solver_args
)
if not keep:
_cleanup([mzn_file, data_file])
if output_mode == 'raw':
return proc.stdout_data
parser = SolutionParser(
solver, output_mode=output_mode, rebase_arrays=rebase_arrays,
types=types, keep_solutions=keep_solutions, return_enums=return_enums
)
solns = parser.parse(proc)
return solns | [
"def",
"minizinc",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"args",
"=",
"None",
",",
"data",
"=",
"None",
",",
"include",
"=",
"None",
",",
"stdlib_dir",
"=",
"None",
",",
"globals_dir",
"=",
"None",
",",
"declare_enums",
"=",
"True",
",",
"allow_multiple_assignments",
"=",
"False",
",",
"keep",
"=",
"False",
",",
"output_vars",
"=",
"None",
",",
"output_base",
"=",
"None",
",",
"output_mode",
"=",
"'dict'",
",",
"solver",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"two_pass",
"=",
"None",
",",
"pre_passes",
"=",
"None",
",",
"output_objective",
"=",
"False",
",",
"non_unique",
"=",
"False",
",",
"all_solutions",
"=",
"False",
",",
"num_solutions",
"=",
"None",
",",
"free_search",
"=",
"False",
",",
"parallel",
"=",
"None",
",",
"seed",
"=",
"None",
",",
"rebase_arrays",
"=",
"True",
",",
"keep_solutions",
"=",
"True",
",",
"return_enums",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"mzn_file",
",",
"dzn_files",
",",
"data_file",
",",
"data",
",",
"keep",
",",
"_output_mode",
",",
"types",
"=",
"_minizinc_preliminaries",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"args",
"=",
"args",
",",
"data",
"=",
"data",
",",
"include",
"=",
"include",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"output_vars",
"=",
"output_vars",
",",
"keep",
"=",
"keep",
",",
"output_base",
"=",
"output_base",
",",
"output_mode",
"=",
"output_mode",
",",
"declare_enums",
"=",
"declare_enums",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
")",
"if",
"not",
"solver",
":",
"solver",
"=",
"config",
".",
"get",
"(",
"'solver'",
",",
"gecode",
")",
"solver_args",
"=",
"{",
"*",
"*",
"kwargs",
",",
"*",
"*",
"config",
".",
"get",
"(",
"'solver_args'",
",",
"{",
"}",
")",
"}",
"proc",
"=",
"solve",
"(",
"solver",
",",
"mzn_file",
",",
"*",
"dzn_files",
",",
"data",
"=",
"data",
",",
"include",
"=",
"include",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"output_mode",
"=",
"_output_mode",
",",
"timeout",
"=",
"timeout",
",",
"two_pass",
"=",
"two_pass",
",",
"pre_passes",
"=",
"pre_passes",
",",
"output_objective",
"=",
"output_objective",
",",
"non_unique",
"=",
"non_unique",
",",
"all_solutions",
"=",
"all_solutions",
",",
"num_solutions",
"=",
"num_solutions",
",",
"free_search",
"=",
"free_search",
",",
"parallel",
"=",
"parallel",
",",
"seed",
"=",
"seed",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
",",
"*",
"*",
"solver_args",
")",
"if",
"not",
"keep",
":",
"_cleanup",
"(",
"[",
"mzn_file",
",",
"data_file",
"]",
")",
"if",
"output_mode",
"==",
"'raw'",
":",
"return",
"proc",
".",
"stdout_data",
"parser",
"=",
"SolutionParser",
"(",
"solver",
",",
"output_mode",
"=",
"output_mode",
",",
"rebase_arrays",
"=",
"rebase_arrays",
",",
"types",
"=",
"types",
",",
"keep_solutions",
"=",
"keep_solutions",
",",
"return_enums",
"=",
"return_enums",
")",
"solns",
"=",
"parser",
".",
"parse",
"(",
"proc",
")",
"return",
"solns"
]
| Implements the workflow for solving a CSP problem encoded with MiniZinc.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
args : dict
Arguments for the template engine.
data : dict
Additional data as a dictionary of variables assignments to supply to
the minizinc executable. The dictionary is automatically converted to
dzn format by the ``pymzn.dict2dzn`` function.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
declare_enums : bool
Whether to declare enum types when converting inline data into dzn
format. If the enum types are declared elsewhere this option should be
False. Default is ``True``.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
keep : bool
Whether to keep the generated ``.mzn``, ``.dzn``, ``.fzn`` and ``.ozn``
files or not. If False, the generated files are created as temporary
files which will be deleted right after the problem is solved. Though
files generated by PyMzn are not intended to be kept, this property can
be used for debugging purpose. Note that in case of error the files are
not deleted even if this parameter is ``False``. Default is ``False``.
output_vars : list of str
A list of output variables. These variables will be the ones included in
the output dictionary. Only available if ``ouptut_mode='dict'``.
output_base : str
Output directory for the files generated by PyMzn. The default
(``None``) is the temporary directory of your OS (if ``keep=False``) or
the current working directory (if ``keep=True``).
output_mode : {'dict', 'item', 'dzn', 'json', 'raw'}
The desired output format. The default is ``'dict'`` which returns a
stream of solutions decoded as python dictionaries. The ``'item'``
format outputs a stream of strings as returned by the ``solns2out``
tool, formatted according to the output statement of the MiniZinc model.
The ``'dzn'`` and ``'json'`` formats output a stream of strings
formatted in dzn of json respectively. The ``'raw'`` format, instead
returns the whole solution stream, without parsing.
solver : Solver
The ``Solver`` instance to use. The default solver is ``gecode``.
timeout : int
The timeout in seconds for the flattening + solving process.
two_pass : bool or int
If ``two_pass`` is True, then it is equivalent to the ``--two-pass``
option for the ``minizinc`` executable. If ``two_pass`` is an integer
``<n>``, instead, it is equivalent to the ``-O<n>`` option for the
``minizinc`` executable.
pre_passes : int
Equivalent to the ``--pre-passes`` option for the ``minizinc``
executable.
output_objective : bool
Equivalent to the ``--output-objective`` option for the ``minizinc``
executable. Adds a field ``_objective`` to all solutions.
non_unique : bool
Equivalent to the ``--non-unique`` option for the ``minizinc``
executable.
all_solutions : bool
Whether all the solutions must be returned. This option might not work
if the solver does not support it. Default is ``False``.
num_solutions : int
The upper bound on the number of solutions to be returned. This option
might not work if the solver does not support it. Default is ``1``.
free_search : bool
If ``True``, instruct the solver to perform free search.
parallel : int
The number of parallel threads the solver can utilize for the solving.
seed : int
The random number generator seed to pass to the solver.
rebase_arrays : bool
Whether to "rebase" parsed arrays (see the `Dzn files
<http://paolodragone.com/pymzn/reference/dzn>`__ section). Default is
True.
keep_solutions : bool
Whether to store the solutions in memory after solving is done. If
``keep_solutions`` is ``False``, the returned solution stream can only
be iterated once and cannot be addressed as a list.
return_enums : bool
Wheter to return enum types along with the variable assignments in the
solutions. Only used if ``output_mode='dict'``. Default is ``False``.
**kwargs
Additional arguments to pass to the solver, provided as additional
keyword arguments to this function. Check the solver documentation for
the available arguments.
Returns
-------
Solutions or str
If ``output_mode`` is not ``'raw'``, returns a list-like object
containing the solutions found by the solver. The format of the solution
depends on the specified ``output_mode``. If ``keep_solutions=False``,
the returned object cannot be addressed as a list and can only be
iterated once. If ``output_mode='raw'``, the function returns the whole
solution stream as a single string. | [
"Implements",
"the",
"workflow",
"for",
"solving",
"a",
"CSP",
"problem",
"encoded",
"with",
"MiniZinc",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L502-L658 | train |
paolodragone/pymzn | pymzn/mzn/minizinc.py | solve | def solve(
solver, mzn, *dzn_files, data=None, include=None, stdlib_dir=None,
globals_dir=None, allow_multiple_assignments=False, output_mode='item',
timeout=None, two_pass=None, pre_passes=None, output_objective=False,
non_unique=False, all_solutions=False, num_solutions=None,
free_search=False, parallel=None, seed=None, **kwargs
):
"""Flatten and solve a MiniZinc program.
Parameters
----------
solver : Solver
The ``Solver`` instance to use.
mzn : str
The path to the minizinc model file.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : list of str
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
output_mode : {'item', 'dzn', 'json'}
The desired output format. The default is ``'item'`` which outputs a
stream of strings as returned by the ``solns2out`` tool, formatted
according to the output statement of the MiniZinc model. The ``'dzn'``
and ``'json'`` formats output a stream of strings formatted in dzn and
json respectively.
timeout : int
The timeout in seconds for the flattening + solving process.
two_pass : bool or int
If ``two_pass`` is True, then it is equivalent to the ``--two-pass``
option for the ``minizinc`` executable. If ``two_pass`` is an integer
``<n>``, instead, it is equivalent to the ``-O<n>`` option for the
``minizinc`` executable.
pre_passes : int
Equivalent to the ``--pre-passes`` option for the ``minizinc``
executable.
output_objective : bool
Equivalent to the ``--output-objective`` option for the ``minizinc``
executable. Adds a field ``_objective`` to all solutions.
non_unique : bool
Equivalent to the ``--non-unique`` option for the ``minizinc``
executable.
all_solutions : bool
Whether all the solutions must be returned. This option might not work
if the solver does not support it. Default is ``False``.
num_solutions : int
The upper bound on the number of solutions to be returned. This option
might not work if the solver does not support it. Default is ``1``.
free_search : bool
If True, instruct the solver to perform free search.
parallel : int
The number of parallel threads the solver can utilize for the solving.
seed : int
The random number generator seed to pass to the solver.
**kwargs
Additional arguments to pass to the solver, provided as additional
keyword arguments to this function. Check the solver documentation for
the available arguments.
Returns
-------
Object wrapping the executed process.
"""
args = _solve_args(
solver, timeout=timeout, two_pass=two_pass, pre_passes=pre_passes,
output_objective=output_objective, non_unique=non_unique,
all_solutions=all_solutions, num_solutions=num_solutions,
free_search=free_search, parallel=parallel, seed=seed, **kwargs
)
args += _flattening_args(
mzn, *dzn_files, data=data, stdlib_dir=stdlib_dir,
globals_dir=globals_dir, output_mode=output_mode, include=include,
allow_multiple_assignments=allow_multiple_assignments
)
input = mzn if args[-1] == '-' else None
t0 = _time()
try:
proc = _run_minizinc_proc(*args, input=input)
except RuntimeError as err:
raise MiniZincError(mzn_file, args) from err
solve_time = _time() - t0
logger.info('Solving completed in {:>3.2f} sec'.format(solve_time))
return proc | python | def solve(
solver, mzn, *dzn_files, data=None, include=None, stdlib_dir=None,
globals_dir=None, allow_multiple_assignments=False, output_mode='item',
timeout=None, two_pass=None, pre_passes=None, output_objective=False,
non_unique=False, all_solutions=False, num_solutions=None,
free_search=False, parallel=None, seed=None, **kwargs
):
"""Flatten and solve a MiniZinc program.
Parameters
----------
solver : Solver
The ``Solver`` instance to use.
mzn : str
The path to the minizinc model file.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : list of str
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
output_mode : {'item', 'dzn', 'json'}
The desired output format. The default is ``'item'`` which outputs a
stream of strings as returned by the ``solns2out`` tool, formatted
according to the output statement of the MiniZinc model. The ``'dzn'``
and ``'json'`` formats output a stream of strings formatted in dzn and
json respectively.
timeout : int
The timeout in seconds for the flattening + solving process.
two_pass : bool or int
If ``two_pass`` is True, then it is equivalent to the ``--two-pass``
option for the ``minizinc`` executable. If ``two_pass`` is an integer
``<n>``, instead, it is equivalent to the ``-O<n>`` option for the
``minizinc`` executable.
pre_passes : int
Equivalent to the ``--pre-passes`` option for the ``minizinc``
executable.
output_objective : bool
Equivalent to the ``--output-objective`` option for the ``minizinc``
executable. Adds a field ``_objective`` to all solutions.
non_unique : bool
Equivalent to the ``--non-unique`` option for the ``minizinc``
executable.
all_solutions : bool
Whether all the solutions must be returned. This option might not work
if the solver does not support it. Default is ``False``.
num_solutions : int
The upper bound on the number of solutions to be returned. This option
might not work if the solver does not support it. Default is ``1``.
free_search : bool
If True, instruct the solver to perform free search.
parallel : int
The number of parallel threads the solver can utilize for the solving.
seed : int
The random number generator seed to pass to the solver.
**kwargs
Additional arguments to pass to the solver, provided as additional
keyword arguments to this function. Check the solver documentation for
the available arguments.
Returns
-------
Object wrapping the executed process.
"""
args = _solve_args(
solver, timeout=timeout, two_pass=two_pass, pre_passes=pre_passes,
output_objective=output_objective, non_unique=non_unique,
all_solutions=all_solutions, num_solutions=num_solutions,
free_search=free_search, parallel=parallel, seed=seed, **kwargs
)
args += _flattening_args(
mzn, *dzn_files, data=data, stdlib_dir=stdlib_dir,
globals_dir=globals_dir, output_mode=output_mode, include=include,
allow_multiple_assignments=allow_multiple_assignments
)
input = mzn if args[-1] == '-' else None
t0 = _time()
try:
proc = _run_minizinc_proc(*args, input=input)
except RuntimeError as err:
raise MiniZincError(mzn_file, args) from err
solve_time = _time() - t0
logger.info('Solving completed in {:>3.2f} sec'.format(solve_time))
return proc | [
"def",
"solve",
"(",
"solver",
",",
"mzn",
",",
"*",
"dzn_files",
",",
"data",
"=",
"None",
",",
"include",
"=",
"None",
",",
"stdlib_dir",
"=",
"None",
",",
"globals_dir",
"=",
"None",
",",
"allow_multiple_assignments",
"=",
"False",
",",
"output_mode",
"=",
"'item'",
",",
"timeout",
"=",
"None",
",",
"two_pass",
"=",
"None",
",",
"pre_passes",
"=",
"None",
",",
"output_objective",
"=",
"False",
",",
"non_unique",
"=",
"False",
",",
"all_solutions",
"=",
"False",
",",
"num_solutions",
"=",
"None",
",",
"free_search",
"=",
"False",
",",
"parallel",
"=",
"None",
",",
"seed",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"_solve_args",
"(",
"solver",
",",
"timeout",
"=",
"timeout",
",",
"two_pass",
"=",
"two_pass",
",",
"pre_passes",
"=",
"pre_passes",
",",
"output_objective",
"=",
"output_objective",
",",
"non_unique",
"=",
"non_unique",
",",
"all_solutions",
"=",
"all_solutions",
",",
"num_solutions",
"=",
"num_solutions",
",",
"free_search",
"=",
"free_search",
",",
"parallel",
"=",
"parallel",
",",
"seed",
"=",
"seed",
",",
"*",
"*",
"kwargs",
")",
"args",
"+=",
"_flattening_args",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"data",
"=",
"data",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"output_mode",
"=",
"output_mode",
",",
"include",
"=",
"include",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
")",
"input",
"=",
"mzn",
"if",
"args",
"[",
"-",
"1",
"]",
"==",
"'-'",
"else",
"None",
"t0",
"=",
"_time",
"(",
")",
"try",
":",
"proc",
"=",
"_run_minizinc_proc",
"(",
"*",
"args",
",",
"input",
"=",
"input",
")",
"except",
"RuntimeError",
"as",
"err",
":",
"raise",
"MiniZincError",
"(",
"mzn_file",
",",
"args",
")",
"from",
"err",
"solve_time",
"=",
"_time",
"(",
")",
"-",
"t0",
"logger",
".",
"info",
"(",
"'Solving completed in {:>3.2f} sec'",
".",
"format",
"(",
"solve_time",
")",
")",
"return",
"proc"
]
| Flatten and solve a MiniZinc program.
Parameters
----------
solver : Solver
The ``Solver`` instance to use.
mzn : str
The path to the minizinc model file.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
data : list of str
Additional data as a list of strings containing dzn variables
assignments.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
output_mode : {'item', 'dzn', 'json'}
The desired output format. The default is ``'item'`` which outputs a
stream of strings as returned by the ``solns2out`` tool, formatted
according to the output statement of the MiniZinc model. The ``'dzn'``
and ``'json'`` formats output a stream of strings formatted in dzn and
json respectively.
timeout : int
The timeout in seconds for the flattening + solving process.
two_pass : bool or int
If ``two_pass`` is True, then it is equivalent to the ``--two-pass``
option for the ``minizinc`` executable. If ``two_pass`` is an integer
``<n>``, instead, it is equivalent to the ``-O<n>`` option for the
``minizinc`` executable.
pre_passes : int
Equivalent to the ``--pre-passes`` option for the ``minizinc``
executable.
output_objective : bool
Equivalent to the ``--output-objective`` option for the ``minizinc``
executable. Adds a field ``_objective`` to all solutions.
non_unique : bool
Equivalent to the ``--non-unique`` option for the ``minizinc``
executable.
all_solutions : bool
Whether all the solutions must be returned. This option might not work
if the solver does not support it. Default is ``False``.
num_solutions : int
The upper bound on the number of solutions to be returned. This option
might not work if the solver does not support it. Default is ``1``.
free_search : bool
If True, instruct the solver to perform free search.
parallel : int
The number of parallel threads the solver can utilize for the solving.
seed : int
The random number generator seed to pass to the solver.
**kwargs
Additional arguments to pass to the solver, provided as additional
keyword arguments to this function. Check the solver documentation for
the available arguments.
Returns
-------
Object wrapping the executed process. | [
"Flatten",
"and",
"solve",
"a",
"MiniZinc",
"program",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L694-L796 | train |
paolodragone/pymzn | pymzn/mzn/minizinc.py | mzn2fzn | def mzn2fzn(
mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None,
globals_dir=None, declare_enums=True, allow_multiple_assignments=False,
keep=False, output_vars=None, output_base=None, output_mode='item',
no_ozn=False
):
"""Flatten a MiniZinc model into a FlatZinc one.
This function is equivalent to the command ``minizinc --compile``.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
args : dict
Arguments for the template engine.
data : dict
Additional data as a dictionary of variables assignments to supply to
the minizinc executable. The dictionary is automatically converted to
dzn format by the ``pymzn.dict2dzn`` function.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
declare_enums : bool
Whether to declare enum types when converting inline data into dzn
format. If the enum types are declared elsewhere this option should be
False. Default is ``True``.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
keep : bool
Whether to keep the generated ``.mzn``, ``.dzn``, ``.fzn`` and ``.ozn``
files or not. If False, the generated files are created as temporary
files which will be deleted right after the problem is solved. Though
files generated by PyMzn are not intended to be kept, this property can
be used for debugging purpose. Note that in case of error the files are
not deleted even if this parameter is ``False``. Default is ``False``.
output_vars : list of str
A list of output variables. These variables will be the ones included in
the output dictionary. Only available if ``ouptut_mode='dict'``.
output_base : str
Output directory for the files generated by PyMzn. The default
(``None``) is the temporary directory of your OS (if ``keep=False``) or
the current working directory (if ``keep=True``).
output_mode : {'dict', 'item', 'dzn', 'json', 'raw'}
The desired output format. The default is ``'dict'`` which returns a
stream of solutions decoded as python dictionaries. The ``'item'``
format outputs a stream of strings as returned by the ``solns2out``
tool, formatted according to the output statement of the MiniZinc model.
The ``'dzn'`` and ``'json'`` formats output a stream of strings
formatted in dzn and json respectively. The ``'raw'`` format, instead
returns the whole solution stream, without parsing.
no_ozn : bool
If ``True``, the ozn file is not produced, ``False`` otherwise.
Returns
-------
tuple (str, str)
The paths to the generated fzn and ozn files. If ``no_ozn=True``, the
second argument is ``None``.
"""
mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \
_minizinc_preliminaries(
mzn, *dzn_files, args=args, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_vars=output_vars, keep=keep, output_base=output_base,
output_mode=output_mode, declare_enums=declare_enums,
allow_multiple_assignments=allow_multiple_assignments
)
args = ['--compile']
args += _flattening_args(
mzn_file, *dzn_files, data=data, stdlib_dir=stdlib_dir,
globals_dir=globals_dir, output_mode=output_mode, include=include,
no_ozn=no_ozn, output_base=output_base,
allow_multiple_assignments=allow_multiple_assignments
)
t0 = _time()
_run_minizinc(*args)
flattening_time = _time() - t0
logger.info('Flattening completed in {:>3.2f} sec'.format(flattening_time))
if not keep:
with contextlib.suppress(FileNotFoundError):
if data_file:
os.remove(data_file)
logger.info('Deleted file: {}'.format(data_file))
if output_base:
mzn_base = output_base
else:
mzn_base = os.path.splitext(mzn_file)[0]
fzn_file = '.'.join([mzn_base, 'fzn'])
fzn_file = fzn_file if os.path.isfile(fzn_file) else None
ozn_file = '.'.join([mzn_base, 'ozn'])
ozn_file = ozn_file if os.path.isfile(ozn_file) else None
if fzn_file:
logger.info('Generated file: {}'.format(fzn_file))
if ozn_file:
logger.info('Generated file: {}'.format(ozn_file))
return fzn_file, ozn_file | python | def mzn2fzn(
mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None,
globals_dir=None, declare_enums=True, allow_multiple_assignments=False,
keep=False, output_vars=None, output_base=None, output_mode='item',
no_ozn=False
):
"""Flatten a MiniZinc model into a FlatZinc one.
This function is equivalent to the command ``minizinc --compile``.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
args : dict
Arguments for the template engine.
data : dict
Additional data as a dictionary of variables assignments to supply to
the minizinc executable. The dictionary is automatically converted to
dzn format by the ``pymzn.dict2dzn`` function.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
declare_enums : bool
Whether to declare enum types when converting inline data into dzn
format. If the enum types are declared elsewhere this option should be
False. Default is ``True``.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
keep : bool
Whether to keep the generated ``.mzn``, ``.dzn``, ``.fzn`` and ``.ozn``
files or not. If False, the generated files are created as temporary
files which will be deleted right after the problem is solved. Though
files generated by PyMzn are not intended to be kept, this property can
be used for debugging purpose. Note that in case of error the files are
not deleted even if this parameter is ``False``. Default is ``False``.
output_vars : list of str
A list of output variables. These variables will be the ones included in
the output dictionary. Only available if ``ouptut_mode='dict'``.
output_base : str
Output directory for the files generated by PyMzn. The default
(``None``) is the temporary directory of your OS (if ``keep=False``) or
the current working directory (if ``keep=True``).
output_mode : {'dict', 'item', 'dzn', 'json', 'raw'}
The desired output format. The default is ``'dict'`` which returns a
stream of solutions decoded as python dictionaries. The ``'item'``
format outputs a stream of strings as returned by the ``solns2out``
tool, formatted according to the output statement of the MiniZinc model.
The ``'dzn'`` and ``'json'`` formats output a stream of strings
formatted in dzn and json respectively. The ``'raw'`` format, instead
returns the whole solution stream, without parsing.
no_ozn : bool
If ``True``, the ozn file is not produced, ``False`` otherwise.
Returns
-------
tuple (str, str)
The paths to the generated fzn and ozn files. If ``no_ozn=True``, the
second argument is ``None``.
"""
mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \
_minizinc_preliminaries(
mzn, *dzn_files, args=args, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_vars=output_vars, keep=keep, output_base=output_base,
output_mode=output_mode, declare_enums=declare_enums,
allow_multiple_assignments=allow_multiple_assignments
)
args = ['--compile']
args += _flattening_args(
mzn_file, *dzn_files, data=data, stdlib_dir=stdlib_dir,
globals_dir=globals_dir, output_mode=output_mode, include=include,
no_ozn=no_ozn, output_base=output_base,
allow_multiple_assignments=allow_multiple_assignments
)
t0 = _time()
_run_minizinc(*args)
flattening_time = _time() - t0
logger.info('Flattening completed in {:>3.2f} sec'.format(flattening_time))
if not keep:
with contextlib.suppress(FileNotFoundError):
if data_file:
os.remove(data_file)
logger.info('Deleted file: {}'.format(data_file))
if output_base:
mzn_base = output_base
else:
mzn_base = os.path.splitext(mzn_file)[0]
fzn_file = '.'.join([mzn_base, 'fzn'])
fzn_file = fzn_file if os.path.isfile(fzn_file) else None
ozn_file = '.'.join([mzn_base, 'ozn'])
ozn_file = ozn_file if os.path.isfile(ozn_file) else None
if fzn_file:
logger.info('Generated file: {}'.format(fzn_file))
if ozn_file:
logger.info('Generated file: {}'.format(ozn_file))
return fzn_file, ozn_file | [
"def",
"mzn2fzn",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"args",
"=",
"None",
",",
"data",
"=",
"None",
",",
"include",
"=",
"None",
",",
"stdlib_dir",
"=",
"None",
",",
"globals_dir",
"=",
"None",
",",
"declare_enums",
"=",
"True",
",",
"allow_multiple_assignments",
"=",
"False",
",",
"keep",
"=",
"False",
",",
"output_vars",
"=",
"None",
",",
"output_base",
"=",
"None",
",",
"output_mode",
"=",
"'item'",
",",
"no_ozn",
"=",
"False",
")",
":",
"mzn_file",
",",
"dzn_files",
",",
"data_file",
",",
"data",
",",
"keep",
",",
"_output_mode",
",",
"types",
"=",
"_minizinc_preliminaries",
"(",
"mzn",
",",
"*",
"dzn_files",
",",
"args",
"=",
"args",
",",
"data",
"=",
"data",
",",
"include",
"=",
"include",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"output_vars",
"=",
"output_vars",
",",
"keep",
"=",
"keep",
",",
"output_base",
"=",
"output_base",
",",
"output_mode",
"=",
"output_mode",
",",
"declare_enums",
"=",
"declare_enums",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
")",
"args",
"=",
"[",
"'--compile'",
"]",
"args",
"+=",
"_flattening_args",
"(",
"mzn_file",
",",
"*",
"dzn_files",
",",
"data",
"=",
"data",
",",
"stdlib_dir",
"=",
"stdlib_dir",
",",
"globals_dir",
"=",
"globals_dir",
",",
"output_mode",
"=",
"output_mode",
",",
"include",
"=",
"include",
",",
"no_ozn",
"=",
"no_ozn",
",",
"output_base",
"=",
"output_base",
",",
"allow_multiple_assignments",
"=",
"allow_multiple_assignments",
")",
"t0",
"=",
"_time",
"(",
")",
"_run_minizinc",
"(",
"*",
"args",
")",
"flattening_time",
"=",
"_time",
"(",
")",
"-",
"t0",
"logger",
".",
"info",
"(",
"'Flattening completed in {:>3.2f} sec'",
".",
"format",
"(",
"flattening_time",
")",
")",
"if",
"not",
"keep",
":",
"with",
"contextlib",
".",
"suppress",
"(",
"FileNotFoundError",
")",
":",
"if",
"data_file",
":",
"os",
".",
"remove",
"(",
"data_file",
")",
"logger",
".",
"info",
"(",
"'Deleted file: {}'",
".",
"format",
"(",
"data_file",
")",
")",
"if",
"output_base",
":",
"mzn_base",
"=",
"output_base",
"else",
":",
"mzn_base",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"mzn_file",
")",
"[",
"0",
"]",
"fzn_file",
"=",
"'.'",
".",
"join",
"(",
"[",
"mzn_base",
",",
"'fzn'",
"]",
")",
"fzn_file",
"=",
"fzn_file",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fzn_file",
")",
"else",
"None",
"ozn_file",
"=",
"'.'",
".",
"join",
"(",
"[",
"mzn_base",
",",
"'ozn'",
"]",
")",
"ozn_file",
"=",
"ozn_file",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"ozn_file",
")",
"else",
"None",
"if",
"fzn_file",
":",
"logger",
".",
"info",
"(",
"'Generated file: {}'",
".",
"format",
"(",
"fzn_file",
")",
")",
"if",
"ozn_file",
":",
"logger",
".",
"info",
"(",
"'Generated file: {}'",
".",
"format",
"(",
"ozn_file",
")",
")",
"return",
"fzn_file",
",",
"ozn_file"
]
| Flatten a MiniZinc model into a FlatZinc one.
This function is equivalent to the command ``minizinc --compile``.
Parameters
----------
mzn : str
The minizinc model. This can be either the path to the ``.mzn`` file or
the content of the model itself.
*dzn_files
A list of paths to dzn files to attach to the minizinc execution,
provided as positional arguments; by default no data file is attached.
args : dict
Arguments for the template engine.
data : dict
Additional data as a dictionary of variables assignments to supply to
the minizinc executable. The dictionary is automatically converted to
dzn format by the ``pymzn.dict2dzn`` function.
include : str or list
One or more additional paths to search for included ``.mzn`` files.
stdlib_dir : str
The path to the MiniZinc standard library. Provide it only if it is
different from the default one.
globals_dir : str
The path to the MiniZinc globals directory. Provide it only if it is
different from the default one.
declare_enums : bool
Whether to declare enum types when converting inline data into dzn
format. If the enum types are declared elsewhere this option should be
False. Default is ``True``.
allow_multiple_assignments : bool
Whether to allow multiple assignments of variables. Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file. Default is ``False``.
keep : bool
Whether to keep the generated ``.mzn``, ``.dzn``, ``.fzn`` and ``.ozn``
files or not. If False, the generated files are created as temporary
files which will be deleted right after the problem is solved. Though
files generated by PyMzn are not intended to be kept, this property can
be used for debugging purpose. Note that in case of error the files are
not deleted even if this parameter is ``False``. Default is ``False``.
output_vars : list of str
A list of output variables. These variables will be the ones included in
the output dictionary. Only available if ``ouptut_mode='dict'``.
output_base : str
Output directory for the files generated by PyMzn. The default
(``None``) is the temporary directory of your OS (if ``keep=False``) or
the current working directory (if ``keep=True``).
output_mode : {'dict', 'item', 'dzn', 'json', 'raw'}
The desired output format. The default is ``'dict'`` which returns a
stream of solutions decoded as python dictionaries. The ``'item'``
format outputs a stream of strings as returned by the ``solns2out``
tool, formatted according to the output statement of the MiniZinc model.
The ``'dzn'`` and ``'json'`` formats output a stream of strings
formatted in dzn and json respectively. The ``'raw'`` format, instead
returns the whole solution stream, without parsing.
no_ozn : bool
If ``True``, the ozn file is not produced, ``False`` otherwise.
Returns
-------
tuple (str, str)
The paths to the generated fzn and ozn files. If ``no_ozn=True``, the
second argument is ``None``. | [
"Flatten",
"a",
"MiniZinc",
"model",
"into",
"a",
"FlatZinc",
"one",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L799-L914 | train |
paolodragone/pymzn | pymzn/mzn/output.py | Solutions.print | def print(self, output_file=sys.stdout, log=False):
"""Print the solution stream"""
for soln in iter(self):
print(soln, file=output_file)
print(SOLN_SEP, file=output_file)
if self.status == 0:
print(SEARCH_COMPLETE, file=output_file)
if (self.status == 1 and self._n_solns == 0) or self.status >= 2:
print({
Status.INCOMPLETE : ERROR,
Status.UNKNOWN: UNKNOWN,
Status.UNSATISFIABLE: UNSATISFIABLE,
Status.UNBOUNDED: UNBOUNDED,
Status.UNSATorUNBOUNDED: UNSATorUNBOUNDED,
Status.ERROR: ERROR
}[self.status], file=output_file)
if self.stderr:
print(self.stderr.strip(), file=sys.stderr)
elif log:
print(str(self.log), file=output_file) | python | def print(self, output_file=sys.stdout, log=False):
"""Print the solution stream"""
for soln in iter(self):
print(soln, file=output_file)
print(SOLN_SEP, file=output_file)
if self.status == 0:
print(SEARCH_COMPLETE, file=output_file)
if (self.status == 1 and self._n_solns == 0) or self.status >= 2:
print({
Status.INCOMPLETE : ERROR,
Status.UNKNOWN: UNKNOWN,
Status.UNSATISFIABLE: UNSATISFIABLE,
Status.UNBOUNDED: UNBOUNDED,
Status.UNSATorUNBOUNDED: UNSATorUNBOUNDED,
Status.ERROR: ERROR
}[self.status], file=output_file)
if self.stderr:
print(self.stderr.strip(), file=sys.stderr)
elif log:
print(str(self.log), file=output_file) | [
"def",
"print",
"(",
"self",
",",
"output_file",
"=",
"sys",
".",
"stdout",
",",
"log",
"=",
"False",
")",
":",
"for",
"soln",
"in",
"iter",
"(",
"self",
")",
":",
"print",
"(",
"soln",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"SOLN_SEP",
",",
"file",
"=",
"output_file",
")",
"if",
"self",
".",
"status",
"==",
"0",
":",
"print",
"(",
"SEARCH_COMPLETE",
",",
"file",
"=",
"output_file",
")",
"if",
"(",
"self",
".",
"status",
"==",
"1",
"and",
"self",
".",
"_n_solns",
"==",
"0",
")",
"or",
"self",
".",
"status",
">=",
"2",
":",
"print",
"(",
"{",
"Status",
".",
"INCOMPLETE",
":",
"ERROR",
",",
"Status",
".",
"UNKNOWN",
":",
"UNKNOWN",
",",
"Status",
".",
"UNSATISFIABLE",
":",
"UNSATISFIABLE",
",",
"Status",
".",
"UNBOUNDED",
":",
"UNBOUNDED",
",",
"Status",
".",
"UNSATorUNBOUNDED",
":",
"UNSATorUNBOUNDED",
",",
"Status",
".",
"ERROR",
":",
"ERROR",
"}",
"[",
"self",
".",
"status",
"]",
",",
"file",
"=",
"output_file",
")",
"if",
"self",
".",
"stderr",
":",
"print",
"(",
"self",
".",
"stderr",
".",
"strip",
"(",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"elif",
"log",
":",
"print",
"(",
"str",
"(",
"self",
".",
"log",
")",
",",
"file",
"=",
"output_file",
")"
]
| Print the solution stream | [
"Print",
"the",
"solution",
"stream"
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/output.py#L143-L167 | train |
paolodragone/pymzn | pymzn/config.py | Config.dump | def dump(self):
"""Writes the changes to the configuration file."""
try:
import yaml
cfg_file = self._cfg_file()
cfg_dir, __ = os.path.split(cfg_file)
os.makedirs(cfg_dir, exist_ok=True)
with open(cfg_file, 'w') as f:
yaml.dump(self, f)
except ImportError as err:
raise RuntimeError(
'Cannot dump the configuration settings to file. You need to '
'install the necessary dependencies (pyyaml, appdirs).'
) from err | python | def dump(self):
"""Writes the changes to the configuration file."""
try:
import yaml
cfg_file = self._cfg_file()
cfg_dir, __ = os.path.split(cfg_file)
os.makedirs(cfg_dir, exist_ok=True)
with open(cfg_file, 'w') as f:
yaml.dump(self, f)
except ImportError as err:
raise RuntimeError(
'Cannot dump the configuration settings to file. You need to '
'install the necessary dependencies (pyyaml, appdirs).'
) from err | [
"def",
"dump",
"(",
"self",
")",
":",
"try",
":",
"import",
"yaml",
"cfg_file",
"=",
"self",
".",
"_cfg_file",
"(",
")",
"cfg_dir",
",",
"__",
"=",
"os",
".",
"path",
".",
"split",
"(",
"cfg_file",
")",
"os",
".",
"makedirs",
"(",
"cfg_dir",
",",
"exist_ok",
"=",
"True",
")",
"with",
"open",
"(",
"cfg_file",
",",
"'w'",
")",
"as",
"f",
":",
"yaml",
".",
"dump",
"(",
"self",
",",
"f",
")",
"except",
"ImportError",
"as",
"err",
":",
"raise",
"RuntimeError",
"(",
"'Cannot dump the configuration settings to file. You need to '",
"'install the necessary dependencies (pyyaml, appdirs).'",
")",
"from",
"err"
]
| Writes the changes to the configuration file. | [
"Writes",
"the",
"changes",
"to",
"the",
"configuration",
"file",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/config.py#L102-L115 | train |
paolodragone/pymzn | pymzn/mzn/templates.py | discretize | def discretize(value, factor=100):
"""Discretize the given value, pre-multiplying by the given factor"""
if not isinstance(value, Iterable):
return int(value * factor)
int_value = list(deepcopy(value))
for i in range(len(int_value)):
int_value[i] = int(int_value[i] * factor)
return int_value | python | def discretize(value, factor=100):
"""Discretize the given value, pre-multiplying by the given factor"""
if not isinstance(value, Iterable):
return int(value * factor)
int_value = list(deepcopy(value))
for i in range(len(int_value)):
int_value[i] = int(int_value[i] * factor)
return int_value | [
"def",
"discretize",
"(",
"value",
",",
"factor",
"=",
"100",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"Iterable",
")",
":",
"return",
"int",
"(",
"value",
"*",
"factor",
")",
"int_value",
"=",
"list",
"(",
"deepcopy",
"(",
"value",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"int_value",
")",
")",
":",
"int_value",
"[",
"i",
"]",
"=",
"int",
"(",
"int_value",
"[",
"i",
"]",
"*",
"factor",
")",
"return",
"int_value"
]
| Discretize the given value, pre-multiplying by the given factor | [
"Discretize",
"the",
"given",
"value",
"pre",
"-",
"multiplying",
"by",
"the",
"given",
"factor"
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/templates.py#L87-L94 | train |
paolodragone/pymzn | pymzn/mzn/templates.py | from_string | def from_string(source, args=None):
"""Renders a template string"""
if _has_jinja:
logger.info('Precompiling model with arguments: {}'.format(args))
return _jenv.from_string(source).render(args or {})
if args:
raise RuntimeError(_except_text)
return source | python | def from_string(source, args=None):
"""Renders a template string"""
if _has_jinja:
logger.info('Precompiling model with arguments: {}'.format(args))
return _jenv.from_string(source).render(args or {})
if args:
raise RuntimeError(_except_text)
return source | [
"def",
"from_string",
"(",
"source",
",",
"args",
"=",
"None",
")",
":",
"if",
"_has_jinja",
":",
"logger",
".",
"info",
"(",
"'Precompiling model with arguments: {}'",
".",
"format",
"(",
"args",
")",
")",
"return",
"_jenv",
".",
"from_string",
"(",
"source",
")",
".",
"render",
"(",
"args",
"or",
"{",
"}",
")",
"if",
"args",
":",
"raise",
"RuntimeError",
"(",
"_except_text",
")",
"return",
"source"
]
| Renders a template string | [
"Renders",
"a",
"template",
"string"
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/templates.py#L146-L153 | train |
paolodragone/pymzn | pymzn/mzn/templates.py | add_package | def add_package(package_name, package_path='templates', encoding='utf-8'):
"""Adds the given package to the template search routine"""
if not _has_jinja:
raise RuntimeError(_except_text)
_jload.add_loader(PackageLoader(package_name, package_path, encoding)) | python | def add_package(package_name, package_path='templates', encoding='utf-8'):
"""Adds the given package to the template search routine"""
if not _has_jinja:
raise RuntimeError(_except_text)
_jload.add_loader(PackageLoader(package_name, package_path, encoding)) | [
"def",
"add_package",
"(",
"package_name",
",",
"package_path",
"=",
"'templates'",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"if",
"not",
"_has_jinja",
":",
"raise",
"RuntimeError",
"(",
"_except_text",
")",
"_jload",
".",
"add_loader",
"(",
"PackageLoader",
"(",
"package_name",
",",
"package_path",
",",
"encoding",
")",
")"
]
| Adds the given package to the template search routine | [
"Adds",
"the",
"given",
"package",
"to",
"the",
"template",
"search",
"routine"
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/templates.py#L155-L159 | train |
paolodragone/pymzn | pymzn/mzn/templates.py | add_path | def add_path(searchpath, encoding='utf-8', followlinks=False):
"""Adds the given path to the template search routine"""
if not _has_jinja:
raise RuntimeError(_except_text)
_jload.add_loader(FileSystemLoader(searchpath, encoding, followlinks)) | python | def add_path(searchpath, encoding='utf-8', followlinks=False):
"""Adds the given path to the template search routine"""
if not _has_jinja:
raise RuntimeError(_except_text)
_jload.add_loader(FileSystemLoader(searchpath, encoding, followlinks)) | [
"def",
"add_path",
"(",
"searchpath",
",",
"encoding",
"=",
"'utf-8'",
",",
"followlinks",
"=",
"False",
")",
":",
"if",
"not",
"_has_jinja",
":",
"raise",
"RuntimeError",
"(",
"_except_text",
")",
"_jload",
".",
"add_loader",
"(",
"FileSystemLoader",
"(",
"searchpath",
",",
"encoding",
",",
"followlinks",
")",
")"
]
| Adds the given path to the template search routine | [
"Adds",
"the",
"given",
"path",
"to",
"the",
"template",
"search",
"routine"
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/templates.py#L162-L166 | train |
paolodragone/pymzn | pymzn/dzn/marsh.py | val2dzn | def val2dzn(val, wrap=True):
"""Serializes a value into its dzn representation.
The supported types are ``bool``, ``int``, ``float``, ``set``, ``array``.
Parameters
----------
val
The value to serialize
wrap : bool
Whether to wrap the serialized value.
Returns
-------
str
The serialized dzn representation of the given value.
"""
if _is_value(val):
dzn_val = _dzn_val(val)
elif _is_set(val):
dzn_val = _dzn_set(val)
elif _is_array_type(val):
dzn_val =_dzn_array_nd(val)
else:
raise TypeError(
'Unsupported serialization of value: {}'.format(repr(val))
)
if wrap:
wrapper = _get_wrapper()
dzn_val = wrapper.fill(dzn_val)
return dzn_val | python | def val2dzn(val, wrap=True):
"""Serializes a value into its dzn representation.
The supported types are ``bool``, ``int``, ``float``, ``set``, ``array``.
Parameters
----------
val
The value to serialize
wrap : bool
Whether to wrap the serialized value.
Returns
-------
str
The serialized dzn representation of the given value.
"""
if _is_value(val):
dzn_val = _dzn_val(val)
elif _is_set(val):
dzn_val = _dzn_set(val)
elif _is_array_type(val):
dzn_val =_dzn_array_nd(val)
else:
raise TypeError(
'Unsupported serialization of value: {}'.format(repr(val))
)
if wrap:
wrapper = _get_wrapper()
dzn_val = wrapper.fill(dzn_val)
return dzn_val | [
"def",
"val2dzn",
"(",
"val",
",",
"wrap",
"=",
"True",
")",
":",
"if",
"_is_value",
"(",
"val",
")",
":",
"dzn_val",
"=",
"_dzn_val",
"(",
"val",
")",
"elif",
"_is_set",
"(",
"val",
")",
":",
"dzn_val",
"=",
"_dzn_set",
"(",
"val",
")",
"elif",
"_is_array_type",
"(",
"val",
")",
":",
"dzn_val",
"=",
"_dzn_array_nd",
"(",
"val",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Unsupported serialization of value: {}'",
".",
"format",
"(",
"repr",
"(",
"val",
")",
")",
")",
"if",
"wrap",
":",
"wrapper",
"=",
"_get_wrapper",
"(",
")",
"dzn_val",
"=",
"wrapper",
".",
"fill",
"(",
"dzn_val",
")",
"return",
"dzn_val"
]
| Serializes a value into its dzn representation.
The supported types are ``bool``, ``int``, ``float``, ``set``, ``array``.
Parameters
----------
val
The value to serialize
wrap : bool
Whether to wrap the serialized value.
Returns
-------
str
The serialized dzn representation of the given value. | [
"Serializes",
"a",
"value",
"into",
"its",
"dzn",
"representation",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/dzn/marsh.py#L215-L247 | train |
paolodragone/pymzn | pymzn/dzn/marsh.py | stmt2dzn | def stmt2dzn(name, val, declare=True, assign=True, wrap=True):
"""Returns a dzn statement declaring and assigning the given value.
Parameters
----------
val
The value to serialize.
declare : bool
Whether to include the declaration of the variable in the statement or
just the assignment.
assign : bool
Wheter to include the assignment of the value in the statement or just
the declaration.
wrap : bool
Whether to wrap the serialized value.
Returns
-------
str
The serialized dzn representation of the value.
"""
if not (declare or assign):
raise ValueError(
'The statement must be a declaration or an assignment.'
)
stmt = []
if declare:
val_type = _dzn_type(val)
stmt.append('{}: '.format(val_type))
stmt.append(name)
if assign:
val_str = val2dzn(val, wrap=wrap)
stmt.append(' = {}'.format(val_str))
stmt.append(';')
return ''.join(stmt) | python | def stmt2dzn(name, val, declare=True, assign=True, wrap=True):
"""Returns a dzn statement declaring and assigning the given value.
Parameters
----------
val
The value to serialize.
declare : bool
Whether to include the declaration of the variable in the statement or
just the assignment.
assign : bool
Wheter to include the assignment of the value in the statement or just
the declaration.
wrap : bool
Whether to wrap the serialized value.
Returns
-------
str
The serialized dzn representation of the value.
"""
if not (declare or assign):
raise ValueError(
'The statement must be a declaration or an assignment.'
)
stmt = []
if declare:
val_type = _dzn_type(val)
stmt.append('{}: '.format(val_type))
stmt.append(name)
if assign:
val_str = val2dzn(val, wrap=wrap)
stmt.append(' = {}'.format(val_str))
stmt.append(';')
return ''.join(stmt) | [
"def",
"stmt2dzn",
"(",
"name",
",",
"val",
",",
"declare",
"=",
"True",
",",
"assign",
"=",
"True",
",",
"wrap",
"=",
"True",
")",
":",
"if",
"not",
"(",
"declare",
"or",
"assign",
")",
":",
"raise",
"ValueError",
"(",
"'The statement must be a declaration or an assignment.'",
")",
"stmt",
"=",
"[",
"]",
"if",
"declare",
":",
"val_type",
"=",
"_dzn_type",
"(",
"val",
")",
"stmt",
".",
"append",
"(",
"'{}: '",
".",
"format",
"(",
"val_type",
")",
")",
"stmt",
".",
"append",
"(",
"name",
")",
"if",
"assign",
":",
"val_str",
"=",
"val2dzn",
"(",
"val",
",",
"wrap",
"=",
"wrap",
")",
"stmt",
".",
"append",
"(",
"' = {}'",
".",
"format",
"(",
"val_str",
")",
")",
"stmt",
".",
"append",
"(",
"';'",
")",
"return",
"''",
".",
"join",
"(",
"stmt",
")"
]
| Returns a dzn statement declaring and assigning the given value.
Parameters
----------
val
The value to serialize.
declare : bool
Whether to include the declaration of the variable in the statement or
just the assignment.
assign : bool
Wheter to include the assignment of the value in the statement or just
the declaration.
wrap : bool
Whether to wrap the serialized value.
Returns
-------
str
The serialized dzn representation of the value. | [
"Returns",
"a",
"dzn",
"statement",
"declaring",
"and",
"assigning",
"the",
"given",
"value",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/dzn/marsh.py#L250-L285 | train |
paolodragone/pymzn | pymzn/dzn/marsh.py | stmt2enum | def stmt2enum(enum_type, declare=True, assign=True, wrap=True):
"""Returns a dzn enum declaration from an enum type.
Parameters
----------
enum_type : Enum
The enum to serialize.
declare : bool
Whether to include the ``enum`` declatation keyword in the statement or
just the assignment.
assign : bool
Wheter to include the assignment of the enum in the statement or just
the declaration.
wrap : bool
Whether to wrap the serialized enum.
Returns
-------
str
The serialized dzn representation of the enum.
"""
if not (declare or assign):
raise ValueError(
'The statement must be a declaration or an assignment.'
)
stmt = []
if declare:
stmt.append('enum ')
stmt.append(enum_type.__name__)
if assign:
val_str = []
for v in list(enum_type):
val_str.append(v.name)
val_str = ''.join(['{', ','.join(val_str), '}'])
if wrap:
wrapper = _get_wrapper()
val_str = wrapper.fill(val_str)
stmt.append(' = {}'.format(val_str))
stmt.append(';')
return ''.join(stmt) | python | def stmt2enum(enum_type, declare=True, assign=True, wrap=True):
"""Returns a dzn enum declaration from an enum type.
Parameters
----------
enum_type : Enum
The enum to serialize.
declare : bool
Whether to include the ``enum`` declatation keyword in the statement or
just the assignment.
assign : bool
Wheter to include the assignment of the enum in the statement or just
the declaration.
wrap : bool
Whether to wrap the serialized enum.
Returns
-------
str
The serialized dzn representation of the enum.
"""
if not (declare or assign):
raise ValueError(
'The statement must be a declaration or an assignment.'
)
stmt = []
if declare:
stmt.append('enum ')
stmt.append(enum_type.__name__)
if assign:
val_str = []
for v in list(enum_type):
val_str.append(v.name)
val_str = ''.join(['{', ','.join(val_str), '}'])
if wrap:
wrapper = _get_wrapper()
val_str = wrapper.fill(val_str)
stmt.append(' = {}'.format(val_str))
stmt.append(';')
return ''.join(stmt) | [
"def",
"stmt2enum",
"(",
"enum_type",
",",
"declare",
"=",
"True",
",",
"assign",
"=",
"True",
",",
"wrap",
"=",
"True",
")",
":",
"if",
"not",
"(",
"declare",
"or",
"assign",
")",
":",
"raise",
"ValueError",
"(",
"'The statement must be a declaration or an assignment.'",
")",
"stmt",
"=",
"[",
"]",
"if",
"declare",
":",
"stmt",
".",
"append",
"(",
"'enum '",
")",
"stmt",
".",
"append",
"(",
"enum_type",
".",
"__name__",
")",
"if",
"assign",
":",
"val_str",
"=",
"[",
"]",
"for",
"v",
"in",
"list",
"(",
"enum_type",
")",
":",
"val_str",
".",
"append",
"(",
"v",
".",
"name",
")",
"val_str",
"=",
"''",
".",
"join",
"(",
"[",
"'{'",
",",
"','",
".",
"join",
"(",
"val_str",
")",
",",
"'}'",
"]",
")",
"if",
"wrap",
":",
"wrapper",
"=",
"_get_wrapper",
"(",
")",
"val_str",
"=",
"wrapper",
".",
"fill",
"(",
"val_str",
")",
"stmt",
".",
"append",
"(",
"' = {}'",
".",
"format",
"(",
"val_str",
")",
")",
"stmt",
".",
"append",
"(",
"';'",
")",
"return",
"''",
".",
"join",
"(",
"stmt",
")"
]
| Returns a dzn enum declaration from an enum type.
Parameters
----------
enum_type : Enum
The enum to serialize.
declare : bool
Whether to include the ``enum`` declatation keyword in the statement or
just the assignment.
assign : bool
Wheter to include the assignment of the enum in the statement or just
the declaration.
wrap : bool
Whether to wrap the serialized enum.
Returns
-------
str
The serialized dzn representation of the enum. | [
"Returns",
"a",
"dzn",
"enum",
"declaration",
"from",
"an",
"enum",
"type",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/dzn/marsh.py#L288-L331 | train |
paolodragone/pymzn | pymzn/dzn/marsh.py | dict2dzn | def dict2dzn(
objs, declare=False, assign=True, declare_enums=True, wrap=True, fout=None
):
"""Serializes the objects in input and produces a list of strings encoding
them into dzn format. Optionally, the produced dzn is written on a file.
Supported types of objects include: ``str``, ``int``, ``float``, ``set``,
``list`` or ``dict``. List and dict are serialized into dzn
(multi-dimensional) arrays. The key-set of a dict is used as index-set of
dzn arrays. The index-set of a list is implicitly set to ``1 .. len(list)``.
Parameters
----------
objs : dict
A dictionary containing the objects to serialize, the keys are the names
of the variables.
declare : bool
Whether to include the declaration of the variable in the statements or
just the assignment. Default is ``False``.
assign : bool
Whether to include assignment of the value in the statements or just the
declaration.
declare_enums : bool
Whether to declare the enums found as types of the objects to serialize.
Default is ``True``.
wrap : bool
Whether to wrap the serialized values.
fout : str
Path to the output file, if None no output file is written.
Returns
-------
list
List of strings containing the dzn-encoded objects.
"""
log = logging.getLogger(__name__)
vals = []
enums = set()
for key, val in objs.items():
if _is_enum(val) and declare_enums:
enum_type = type(val)
enum_name = enum_type.__name__
if enum_name not in enums:
enum_stmt = stmt2enum(
enum_type, declare=declare, assign=assign, wrap=wrap
)
vals.append(enum_stmt)
enums.add(enum_name)
stmt = stmt2dzn(key, val, declare=declare, assign=assign, wrap=wrap)
vals.append(stmt)
if fout:
log.debug('Writing file: {}'.format(fout))
with open(fout, 'w') as f:
for val in vals:
f.write('{}\n\n'.format(val))
return vals | python | def dict2dzn(
objs, declare=False, assign=True, declare_enums=True, wrap=True, fout=None
):
"""Serializes the objects in input and produces a list of strings encoding
them into dzn format. Optionally, the produced dzn is written on a file.
Supported types of objects include: ``str``, ``int``, ``float``, ``set``,
``list`` or ``dict``. List and dict are serialized into dzn
(multi-dimensional) arrays. The key-set of a dict is used as index-set of
dzn arrays. The index-set of a list is implicitly set to ``1 .. len(list)``.
Parameters
----------
objs : dict
A dictionary containing the objects to serialize, the keys are the names
of the variables.
declare : bool
Whether to include the declaration of the variable in the statements or
just the assignment. Default is ``False``.
assign : bool
Whether to include assignment of the value in the statements or just the
declaration.
declare_enums : bool
Whether to declare the enums found as types of the objects to serialize.
Default is ``True``.
wrap : bool
Whether to wrap the serialized values.
fout : str
Path to the output file, if None no output file is written.
Returns
-------
list
List of strings containing the dzn-encoded objects.
"""
log = logging.getLogger(__name__)
vals = []
enums = set()
for key, val in objs.items():
if _is_enum(val) and declare_enums:
enum_type = type(val)
enum_name = enum_type.__name__
if enum_name not in enums:
enum_stmt = stmt2enum(
enum_type, declare=declare, assign=assign, wrap=wrap
)
vals.append(enum_stmt)
enums.add(enum_name)
stmt = stmt2dzn(key, val, declare=declare, assign=assign, wrap=wrap)
vals.append(stmt)
if fout:
log.debug('Writing file: {}'.format(fout))
with open(fout, 'w') as f:
for val in vals:
f.write('{}\n\n'.format(val))
return vals | [
"def",
"dict2dzn",
"(",
"objs",
",",
"declare",
"=",
"False",
",",
"assign",
"=",
"True",
",",
"declare_enums",
"=",
"True",
",",
"wrap",
"=",
"True",
",",
"fout",
"=",
"None",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"vals",
"=",
"[",
"]",
"enums",
"=",
"set",
"(",
")",
"for",
"key",
",",
"val",
"in",
"objs",
".",
"items",
"(",
")",
":",
"if",
"_is_enum",
"(",
"val",
")",
"and",
"declare_enums",
":",
"enum_type",
"=",
"type",
"(",
"val",
")",
"enum_name",
"=",
"enum_type",
".",
"__name__",
"if",
"enum_name",
"not",
"in",
"enums",
":",
"enum_stmt",
"=",
"stmt2enum",
"(",
"enum_type",
",",
"declare",
"=",
"declare",
",",
"assign",
"=",
"assign",
",",
"wrap",
"=",
"wrap",
")",
"vals",
".",
"append",
"(",
"enum_stmt",
")",
"enums",
".",
"add",
"(",
"enum_name",
")",
"stmt",
"=",
"stmt2dzn",
"(",
"key",
",",
"val",
",",
"declare",
"=",
"declare",
",",
"assign",
"=",
"assign",
",",
"wrap",
"=",
"wrap",
")",
"vals",
".",
"append",
"(",
"stmt",
")",
"if",
"fout",
":",
"log",
".",
"debug",
"(",
"'Writing file: {}'",
".",
"format",
"(",
"fout",
")",
")",
"with",
"open",
"(",
"fout",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"val",
"in",
"vals",
":",
"f",
".",
"write",
"(",
"'{}\\n\\n'",
".",
"format",
"(",
"val",
")",
")",
"return",
"vals"
]
| Serializes the objects in input and produces a list of strings encoding
them into dzn format. Optionally, the produced dzn is written on a file.
Supported types of objects include: ``str``, ``int``, ``float``, ``set``,
``list`` or ``dict``. List and dict are serialized into dzn
(multi-dimensional) arrays. The key-set of a dict is used as index-set of
dzn arrays. The index-set of a list is implicitly set to ``1 .. len(list)``.
Parameters
----------
objs : dict
A dictionary containing the objects to serialize, the keys are the names
of the variables.
declare : bool
Whether to include the declaration of the variable in the statements or
just the assignment. Default is ``False``.
assign : bool
Whether to include assignment of the value in the statements or just the
declaration.
declare_enums : bool
Whether to declare the enums found as types of the objects to serialize.
Default is ``True``.
wrap : bool
Whether to wrap the serialized values.
fout : str
Path to the output file, if None no output file is written.
Returns
-------
list
List of strings containing the dzn-encoded objects. | [
"Serializes",
"the",
"objects",
"in",
"input",
"and",
"produces",
"a",
"list",
"of",
"strings",
"encoding",
"them",
"into",
"dzn",
"format",
".",
"Optionally",
"the",
"produced",
"dzn",
"is",
"written",
"on",
"a",
"file",
"."
]
| 35b04cfb244918551649b9bb8a0ab65d37c31fe4 | https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/dzn/marsh.py#L334-L391 | train |
PolicyStat/jobtastic | jobtastic/task.py | JobtasticTask.async_or_eager | def async_or_eager(self, **options):
"""
Attempt to call self.apply_async, or if that fails because of a problem
with the broker, run the task eagerly and return an EagerResult.
"""
args = options.pop("args", None)
kwargs = options.pop("kwargs", None)
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
return self.apply_async(args, kwargs, **options)
except possible_broker_errors:
return self.apply(args, kwargs, **options) | python | def async_or_eager(self, **options):
"""
Attempt to call self.apply_async, or if that fails because of a problem
with the broker, run the task eagerly and return an EagerResult.
"""
args = options.pop("args", None)
kwargs = options.pop("kwargs", None)
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
return self.apply_async(args, kwargs, **options)
except possible_broker_errors:
return self.apply(args, kwargs, **options) | [
"def",
"async_or_eager",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"args",
"=",
"options",
".",
"pop",
"(",
"\"args\"",
",",
"None",
")",
"kwargs",
"=",
"options",
".",
"pop",
"(",
"\"kwargs\"",
",",
"None",
")",
"possible_broker_errors",
"=",
"self",
".",
"_get_possible_broker_errors_tuple",
"(",
")",
"try",
":",
"return",
"self",
".",
"apply_async",
"(",
"args",
",",
"kwargs",
",",
"*",
"*",
"options",
")",
"except",
"possible_broker_errors",
":",
"return",
"self",
".",
"apply",
"(",
"args",
",",
"kwargs",
",",
"*",
"*",
"options",
")"
]
| Attempt to call self.apply_async, or if that fails because of a problem
with the broker, run the task eagerly and return an EagerResult. | [
"Attempt",
"to",
"call",
"self",
".",
"apply_async",
"or",
"if",
"that",
"fails",
"because",
"of",
"a",
"problem",
"with",
"the",
"broker",
"run",
"the",
"task",
"eagerly",
"and",
"return",
"an",
"EagerResult",
"."
]
| 19cd3137ebf46877cee1ee5155d318bb6261ee1c | https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L90-L101 | train |
PolicyStat/jobtastic | jobtastic/task.py | JobtasticTask.async_or_fail | def async_or_fail(self, **options):
"""
Attempt to call self.apply_async, but if that fails with an exception,
we fake the task completion using the exception as the result. This
allows us to seamlessly handle errors on task creation the same way we
handle errors when a task runs, simplifying the user interface.
"""
args = options.pop("args", None)
kwargs = options.pop("kwargs", None)
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
return self.apply_async(args, kwargs, **options)
except possible_broker_errors as e:
return self.simulate_async_error(e) | python | def async_or_fail(self, **options):
"""
Attempt to call self.apply_async, but if that fails with an exception,
we fake the task completion using the exception as the result. This
allows us to seamlessly handle errors on task creation the same way we
handle errors when a task runs, simplifying the user interface.
"""
args = options.pop("args", None)
kwargs = options.pop("kwargs", None)
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
return self.apply_async(args, kwargs, **options)
except possible_broker_errors as e:
return self.simulate_async_error(e) | [
"def",
"async_or_fail",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"args",
"=",
"options",
".",
"pop",
"(",
"\"args\"",
",",
"None",
")",
"kwargs",
"=",
"options",
".",
"pop",
"(",
"\"kwargs\"",
",",
"None",
")",
"possible_broker_errors",
"=",
"self",
".",
"_get_possible_broker_errors_tuple",
"(",
")",
"try",
":",
"return",
"self",
".",
"apply_async",
"(",
"args",
",",
"kwargs",
",",
"*",
"*",
"options",
")",
"except",
"possible_broker_errors",
"as",
"e",
":",
"return",
"self",
".",
"simulate_async_error",
"(",
"e",
")"
]
| Attempt to call self.apply_async, but if that fails with an exception,
we fake the task completion using the exception as the result. This
allows us to seamlessly handle errors on task creation the same way we
handle errors when a task runs, simplifying the user interface. | [
"Attempt",
"to",
"call",
"self",
".",
"apply_async",
"but",
"if",
"that",
"fails",
"with",
"an",
"exception",
"we",
"fake",
"the",
"task",
"completion",
"using",
"the",
"exception",
"as",
"the",
"result",
".",
"This",
"allows",
"us",
"to",
"seamlessly",
"handle",
"errors",
"on",
"task",
"creation",
"the",
"same",
"way",
"we",
"handle",
"errors",
"when",
"a",
"task",
"runs",
"simplifying",
"the",
"user",
"interface",
"."
]
| 19cd3137ebf46877cee1ee5155d318bb6261ee1c | https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L104-L117 | train |
PolicyStat/jobtastic | jobtastic/task.py | JobtasticTask.delay_or_eager | def delay_or_eager(self, *args, **kwargs):
"""
Wrap async_or_eager with a convenience signiture like delay
"""
return self.async_or_eager(args=args, kwargs=kwargs) | python | def delay_or_eager(self, *args, **kwargs):
"""
Wrap async_or_eager with a convenience signiture like delay
"""
return self.async_or_eager(args=args, kwargs=kwargs) | [
"def",
"delay_or_eager",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"async_or_eager",
"(",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")"
]
| Wrap async_or_eager with a convenience signiture like delay | [
"Wrap",
"async_or_eager",
"with",
"a",
"convenience",
"signiture",
"like",
"delay"
]
| 19cd3137ebf46877cee1ee5155d318bb6261ee1c | https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L120-L124 | train |
PolicyStat/jobtastic | jobtastic/task.py | JobtasticTask.delay_or_run | def delay_or_run(self, *args, **kwargs):
"""
Attempt to call self.delay, or if that fails, call self.run.
Returns a tuple, (result, required_fallback). ``result`` is the result
of calling delay or run. ``required_fallback`` is True if the broker
failed we had to resort to `self.run`.
"""
warnings.warn(
"delay_or_run is deprecated. Please use delay_or_eager",
DeprecationWarning,
)
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
result = self.apply_async(args=args, kwargs=kwargs)
required_fallback = False
except possible_broker_errors:
result = self().run(*args, **kwargs)
required_fallback = True
return result, required_fallback | python | def delay_or_run(self, *args, **kwargs):
"""
Attempt to call self.delay, or if that fails, call self.run.
Returns a tuple, (result, required_fallback). ``result`` is the result
of calling delay or run. ``required_fallback`` is True if the broker
failed we had to resort to `self.run`.
"""
warnings.warn(
"delay_or_run is deprecated. Please use delay_or_eager",
DeprecationWarning,
)
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
result = self.apply_async(args=args, kwargs=kwargs)
required_fallback = False
except possible_broker_errors:
result = self().run(*args, **kwargs)
required_fallback = True
return result, required_fallback | [
"def",
"delay_or_run",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"delay_or_run is deprecated. Please use delay_or_eager\"",
",",
"DeprecationWarning",
",",
")",
"possible_broker_errors",
"=",
"self",
".",
"_get_possible_broker_errors_tuple",
"(",
")",
"try",
":",
"result",
"=",
"self",
".",
"apply_async",
"(",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")",
"required_fallback",
"=",
"False",
"except",
"possible_broker_errors",
":",
"result",
"=",
"self",
"(",
")",
".",
"run",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"required_fallback",
"=",
"True",
"return",
"result",
",",
"required_fallback"
]
| Attempt to call self.delay, or if that fails, call self.run.
Returns a tuple, (result, required_fallback). ``result`` is the result
of calling delay or run. ``required_fallback`` is True if the broker
failed we had to resort to `self.run`. | [
"Attempt",
"to",
"call",
"self",
".",
"delay",
"or",
"if",
"that",
"fails",
"call",
"self",
".",
"run",
"."
]
| 19cd3137ebf46877cee1ee5155d318bb6261ee1c | https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L127-L146 | train |
PolicyStat/jobtastic | jobtastic/task.py | JobtasticTask.delay_or_fail | def delay_or_fail(self, *args, **kwargs):
"""
Wrap async_or_fail with a convenience signiture like delay
"""
return self.async_or_fail(args=args, kwargs=kwargs) | python | def delay_or_fail(self, *args, **kwargs):
"""
Wrap async_or_fail with a convenience signiture like delay
"""
return self.async_or_fail(args=args, kwargs=kwargs) | [
"def",
"delay_or_fail",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"async_or_fail",
"(",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")"
]
| Wrap async_or_fail with a convenience signiture like delay | [
"Wrap",
"async_or_fail",
"with",
"a",
"convenience",
"signiture",
"like",
"delay"
]
| 19cd3137ebf46877cee1ee5155d318bb6261ee1c | https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L149-L153 | train |
PolicyStat/jobtastic | jobtastic/task.py | JobtasticTask.simulate_async_error | def simulate_async_error(self, exception):
"""
Take this exception and store it as an error in the result backend.
This unifies the handling of broker-connection errors with any other
type of error that might occur when running the task. So the same
error-handling that might retry a task or display a useful message to
the user can also handle this error.
"""
task_id = gen_unique_id()
async_result = self.AsyncResult(task_id)
einfo = ExceptionInfo(sys.exc_info())
async_result.backend.mark_as_failure(
task_id,
exception,
traceback=einfo.traceback,
)
return async_result | python | def simulate_async_error(self, exception):
"""
Take this exception and store it as an error in the result backend.
This unifies the handling of broker-connection errors with any other
type of error that might occur when running the task. So the same
error-handling that might retry a task or display a useful message to
the user can also handle this error.
"""
task_id = gen_unique_id()
async_result = self.AsyncResult(task_id)
einfo = ExceptionInfo(sys.exc_info())
async_result.backend.mark_as_failure(
task_id,
exception,
traceback=einfo.traceback,
)
return async_result | [
"def",
"simulate_async_error",
"(",
"self",
",",
"exception",
")",
":",
"task_id",
"=",
"gen_unique_id",
"(",
")",
"async_result",
"=",
"self",
".",
"AsyncResult",
"(",
"task_id",
")",
"einfo",
"=",
"ExceptionInfo",
"(",
"sys",
".",
"exc_info",
"(",
")",
")",
"async_result",
".",
"backend",
".",
"mark_as_failure",
"(",
"task_id",
",",
"exception",
",",
"traceback",
"=",
"einfo",
".",
"traceback",
",",
")",
"return",
"async_result"
]
| Take this exception and store it as an error in the result backend.
This unifies the handling of broker-connection errors with any other
type of error that might occur when running the task. So the same
error-handling that might retry a task or display a useful message to
the user can also handle this error. | [
"Take",
"this",
"exception",
"and",
"store",
"it",
"as",
"an",
"error",
"in",
"the",
"result",
"backend",
".",
"This",
"unifies",
"the",
"handling",
"of",
"broker",
"-",
"connection",
"errors",
"with",
"any",
"other",
"type",
"of",
"error",
"that",
"might",
"occur",
"when",
"running",
"the",
"task",
".",
"So",
"the",
"same",
"error",
"-",
"handling",
"that",
"might",
"retry",
"a",
"task",
"or",
"display",
"a",
"useful",
"message",
"to",
"the",
"user",
"can",
"also",
"handle",
"this",
"error",
"."
]
| 19cd3137ebf46877cee1ee5155d318bb6261ee1c | https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L178-L196 | train |
PolicyStat/jobtastic | jobtastic/task.py | JobtasticTask.calc_progress | def calc_progress(self, completed_count, total_count):
"""
Calculate the percentage progress and estimated remaining time based on
the current number of items completed of the total.
Returns a tuple of ``(percentage_complete, seconds_remaining)``.
"""
self.logger.debug(
"calc_progress(%s, %s)",
completed_count,
total_count,
)
current_time = time.time()
time_spent = current_time - self.start_time
self.logger.debug("Progress time spent: %s", time_spent)
if total_count == 0:
return 100, 1
completion_fraction = completed_count / total_count
if completion_fraction == 0:
completion_fraction = 1
total_time = 0
total_time = time_spent / completion_fraction
time_remaining = total_time - time_spent
completion_display = completion_fraction * 100
if completion_display == 100:
return 100, 1 # 1 second to finish up
return completion_display, time_remaining | python | def calc_progress(self, completed_count, total_count):
"""
Calculate the percentage progress and estimated remaining time based on
the current number of items completed of the total.
Returns a tuple of ``(percentage_complete, seconds_remaining)``.
"""
self.logger.debug(
"calc_progress(%s, %s)",
completed_count,
total_count,
)
current_time = time.time()
time_spent = current_time - self.start_time
self.logger.debug("Progress time spent: %s", time_spent)
if total_count == 0:
return 100, 1
completion_fraction = completed_count / total_count
if completion_fraction == 0:
completion_fraction = 1
total_time = 0
total_time = time_spent / completion_fraction
time_remaining = total_time - time_spent
completion_display = completion_fraction * 100
if completion_display == 100:
return 100, 1 # 1 second to finish up
return completion_display, time_remaining | [
"def",
"calc_progress",
"(",
"self",
",",
"completed_count",
",",
"total_count",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"calc_progress(%s, %s)\"",
",",
"completed_count",
",",
"total_count",
",",
")",
"current_time",
"=",
"time",
".",
"time",
"(",
")",
"time_spent",
"=",
"current_time",
"-",
"self",
".",
"start_time",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Progress time spent: %s\"",
",",
"time_spent",
")",
"if",
"total_count",
"==",
"0",
":",
"return",
"100",
",",
"1",
"completion_fraction",
"=",
"completed_count",
"/",
"total_count",
"if",
"completion_fraction",
"==",
"0",
":",
"completion_fraction",
"=",
"1",
"total_time",
"=",
"0",
"total_time",
"=",
"time_spent",
"/",
"completion_fraction",
"time_remaining",
"=",
"total_time",
"-",
"time_spent",
"completion_display",
"=",
"completion_fraction",
"*",
"100",
"if",
"completion_display",
"==",
"100",
":",
"return",
"100",
",",
"1",
"# 1 second to finish up",
"return",
"completion_display",
",",
"time_remaining"
]
| Calculate the percentage progress and estimated remaining time based on
the current number of items completed of the total.
Returns a tuple of ``(percentage_complete, seconds_remaining)``. | [
"Calculate",
"the",
"percentage",
"progress",
"and",
"estimated",
"remaining",
"time",
"based",
"on",
"the",
"current",
"number",
"of",
"items",
"completed",
"of",
"the",
"total",
"."
]
| 19cd3137ebf46877cee1ee5155d318bb6261ee1c | https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L246-L278 | train |
PolicyStat/jobtastic | jobtastic/task.py | JobtasticTask.update_progress | def update_progress(
self,
completed_count,
total_count,
update_frequency=1,
):
"""
Update the task backend with both an estimated percentage complete and
number of seconds remaining until completion.
``completed_count`` Number of task "units" that have been completed out
of ``total_count`` total "units."
``update_frequency`` Only actually store the updated progress in the
background at most every ``N`` ``completed_count``.
"""
if completed_count - self._last_update_count < update_frequency:
# We've updated the progress too recently. Don't stress out the
# result backend
return
# Store progress for display
progress_percent, time_remaining = self.calc_progress(
completed_count, total_count)
self.logger.debug(
"Updating progress: %s percent, %s remaining",
progress_percent,
time_remaining)
if self.request.id:
self._last_update_count = completed_count
self.update_state(None, PROGRESS, {
"progress_percent": progress_percent,
"time_remaining": time_remaining,
}) | python | def update_progress(
self,
completed_count,
total_count,
update_frequency=1,
):
"""
Update the task backend with both an estimated percentage complete and
number of seconds remaining until completion.
``completed_count`` Number of task "units" that have been completed out
of ``total_count`` total "units."
``update_frequency`` Only actually store the updated progress in the
background at most every ``N`` ``completed_count``.
"""
if completed_count - self._last_update_count < update_frequency:
# We've updated the progress too recently. Don't stress out the
# result backend
return
# Store progress for display
progress_percent, time_remaining = self.calc_progress(
completed_count, total_count)
self.logger.debug(
"Updating progress: %s percent, %s remaining",
progress_percent,
time_remaining)
if self.request.id:
self._last_update_count = completed_count
self.update_state(None, PROGRESS, {
"progress_percent": progress_percent,
"time_remaining": time_remaining,
}) | [
"def",
"update_progress",
"(",
"self",
",",
"completed_count",
",",
"total_count",
",",
"update_frequency",
"=",
"1",
",",
")",
":",
"if",
"completed_count",
"-",
"self",
".",
"_last_update_count",
"<",
"update_frequency",
":",
"# We've updated the progress too recently. Don't stress out the",
"# result backend",
"return",
"# Store progress for display",
"progress_percent",
",",
"time_remaining",
"=",
"self",
".",
"calc_progress",
"(",
"completed_count",
",",
"total_count",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Updating progress: %s percent, %s remaining\"",
",",
"progress_percent",
",",
"time_remaining",
")",
"if",
"self",
".",
"request",
".",
"id",
":",
"self",
".",
"_last_update_count",
"=",
"completed_count",
"self",
".",
"update_state",
"(",
"None",
",",
"PROGRESS",
",",
"{",
"\"progress_percent\"",
":",
"progress_percent",
",",
"\"time_remaining\"",
":",
"time_remaining",
",",
"}",
")"
]
| Update the task backend with both an estimated percentage complete and
number of seconds remaining until completion.
``completed_count`` Number of task "units" that have been completed out
of ``total_count`` total "units."
``update_frequency`` Only actually store the updated progress in the
background at most every ``N`` ``completed_count``. | [
"Update",
"the",
"task",
"backend",
"with",
"both",
"an",
"estimated",
"percentage",
"complete",
"and",
"number",
"of",
"seconds",
"remaining",
"until",
"completion",
"."
]
| 19cd3137ebf46877cee1ee5155d318bb6261ee1c | https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L280-L311 | train |
PolicyStat/jobtastic | jobtastic/task.py | JobtasticTask._validate_required_class_vars | def _validate_required_class_vars(self):
"""
Ensure that this subclass has defined all of the required class
variables.
"""
required_members = (
'significant_kwargs',
'herd_avoidance_timeout',
)
for required_member in required_members:
if not hasattr(self, required_member):
raise Exception(
"JobtasticTask's must define a %s" % required_member) | python | def _validate_required_class_vars(self):
"""
Ensure that this subclass has defined all of the required class
variables.
"""
required_members = (
'significant_kwargs',
'herd_avoidance_timeout',
)
for required_member in required_members:
if not hasattr(self, required_member):
raise Exception(
"JobtasticTask's must define a %s" % required_member) | [
"def",
"_validate_required_class_vars",
"(",
"self",
")",
":",
"required_members",
"=",
"(",
"'significant_kwargs'",
",",
"'herd_avoidance_timeout'",
",",
")",
"for",
"required_member",
"in",
"required_members",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"required_member",
")",
":",
"raise",
"Exception",
"(",
"\"JobtasticTask's must define a %s\"",
"%",
"required_member",
")"
]
| Ensure that this subclass has defined all of the required class
variables. | [
"Ensure",
"that",
"this",
"subclass",
"has",
"defined",
"all",
"of",
"the",
"required",
"class",
"variables",
"."
]
| 19cd3137ebf46877cee1ee5155d318bb6261ee1c | https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L378-L390 | train |
PolicyStat/jobtastic | jobtastic/task.py | JobtasticTask.on_success | def on_success(self, retval, task_id, args, kwargs):
"""
Store results in the backend even if we're always eager. This ensures
the `delay_or_run` calls always at least have results.
"""
if self.request.is_eager:
# Store the result because celery wouldn't otherwise
self.update_state(task_id, SUCCESS, retval) | python | def on_success(self, retval, task_id, args, kwargs):
"""
Store results in the backend even if we're always eager. This ensures
the `delay_or_run` calls always at least have results.
"""
if self.request.is_eager:
# Store the result because celery wouldn't otherwise
self.update_state(task_id, SUCCESS, retval) | [
"def",
"on_success",
"(",
"self",
",",
"retval",
",",
"task_id",
",",
"args",
",",
"kwargs",
")",
":",
"if",
"self",
".",
"request",
".",
"is_eager",
":",
"# Store the result because celery wouldn't otherwise",
"self",
".",
"update_state",
"(",
"task_id",
",",
"SUCCESS",
",",
"retval",
")"
]
| Store results in the backend even if we're always eager. This ensures
the `delay_or_run` calls always at least have results. | [
"Store",
"results",
"in",
"the",
"backend",
"even",
"if",
"we",
"re",
"always",
"eager",
".",
"This",
"ensures",
"the",
"delay_or_run",
"calls",
"always",
"at",
"least",
"have",
"results",
"."
]
| 19cd3137ebf46877cee1ee5155d318bb6261ee1c | https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L392-L399 | train |
PolicyStat/jobtastic | jobtastic/task.py | JobtasticTask._get_cache | def _get_cache(self):
"""
Return the cache to use for thundering herd protection, etc.
"""
if not self._cache:
self._cache = get_cache(self.app)
return self._cache | python | def _get_cache(self):
"""
Return the cache to use for thundering herd protection, etc.
"""
if not self._cache:
self._cache = get_cache(self.app)
return self._cache | [
"def",
"_get_cache",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_cache",
":",
"self",
".",
"_cache",
"=",
"get_cache",
"(",
"self",
".",
"app",
")",
"return",
"self",
".",
"_cache"
]
| Return the cache to use for thundering herd protection, etc. | [
"Return",
"the",
"cache",
"to",
"use",
"for",
"thundering",
"herd",
"protection",
"etc",
"."
]
| 19cd3137ebf46877cee1ee5155d318bb6261ee1c | https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L405-L411 | train |
PolicyStat/jobtastic | jobtastic/task.py | JobtasticTask._get_cache_key | def _get_cache_key(self, **kwargs):
"""
Take this task's configured ``significant_kwargs`` and build a hash
that all equivalent task calls will match.
Takes in kwargs and returns a string.
To change the way the cache key is generated or do more in-depth
processing, override this method.
"""
m = md5()
for significant_kwarg in self.significant_kwargs:
key, to_str = significant_kwarg
try:
m.update(to_str(kwargs[key]))
except (TypeError, UnicodeEncodeError):
# Python 3.x strings aren't accepted by hash.update().
# String should be byte-encoded first.
m.update(to_str(kwargs[key]).encode('utf-8'))
if hasattr(self, 'cache_prefix'):
cache_prefix = self.cache_prefix
else:
cache_prefix = '%s.%s' % (self.__module__, self.__name__)
return '%s:%s' % (cache_prefix, m.hexdigest()) | python | def _get_cache_key(self, **kwargs):
"""
Take this task's configured ``significant_kwargs`` and build a hash
that all equivalent task calls will match.
Takes in kwargs and returns a string.
To change the way the cache key is generated or do more in-depth
processing, override this method.
"""
m = md5()
for significant_kwarg in self.significant_kwargs:
key, to_str = significant_kwarg
try:
m.update(to_str(kwargs[key]))
except (TypeError, UnicodeEncodeError):
# Python 3.x strings aren't accepted by hash.update().
# String should be byte-encoded first.
m.update(to_str(kwargs[key]).encode('utf-8'))
if hasattr(self, 'cache_prefix'):
cache_prefix = self.cache_prefix
else:
cache_prefix = '%s.%s' % (self.__module__, self.__name__)
return '%s:%s' % (cache_prefix, m.hexdigest()) | [
"def",
"_get_cache_key",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"m",
"=",
"md5",
"(",
")",
"for",
"significant_kwarg",
"in",
"self",
".",
"significant_kwargs",
":",
"key",
",",
"to_str",
"=",
"significant_kwarg",
"try",
":",
"m",
".",
"update",
"(",
"to_str",
"(",
"kwargs",
"[",
"key",
"]",
")",
")",
"except",
"(",
"TypeError",
",",
"UnicodeEncodeError",
")",
":",
"# Python 3.x strings aren't accepted by hash.update().",
"# String should be byte-encoded first.",
"m",
".",
"update",
"(",
"to_str",
"(",
"kwargs",
"[",
"key",
"]",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"if",
"hasattr",
"(",
"self",
",",
"'cache_prefix'",
")",
":",
"cache_prefix",
"=",
"self",
".",
"cache_prefix",
"else",
":",
"cache_prefix",
"=",
"'%s.%s'",
"%",
"(",
"self",
".",
"__module__",
",",
"self",
".",
"__name__",
")",
"return",
"'%s:%s'",
"%",
"(",
"cache_prefix",
",",
"m",
".",
"hexdigest",
"(",
")",
")"
]
| Take this task's configured ``significant_kwargs`` and build a hash
that all equivalent task calls will match.
Takes in kwargs and returns a string.
To change the way the cache key is generated or do more in-depth
processing, override this method. | [
"Take",
"this",
"task",
"s",
"configured",
"significant_kwargs",
"and",
"build",
"a",
"hash",
"that",
"all",
"equivalent",
"task",
"calls",
"will",
"match",
"."
]
| 19cd3137ebf46877cee1ee5155d318bb6261ee1c | https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/task.py#L426-L450 | train |
PolicyStat/jobtastic | jobtastic/cache/__init__.py | get_cache | def get_cache(app):
"""
Attempt to find a valid cache from the Celery configuration
If the setting is a valid cache, just use it.
Otherwise, if Django is installed, then:
If the setting is a valid Django cache entry, then use that.
If the setting is empty use the default cache
Otherwise, if Werkzeug is installed, then:
If the setting is a valid Celery Memcache or Redis Backend, then use
that.
If the setting is empty and the default Celery Result Backend is
Memcache or Redis, then use that
Otherwise fail
"""
jobtastic_cache_setting = app.conf.get('JOBTASTIC_CACHE')
if isinstance(jobtastic_cache_setting, BaseCache):
return jobtastic_cache_setting
if 'Django' in CACHES:
if jobtastic_cache_setting:
try:
return WrappedCache(get_django_cache(jobtastic_cache_setting))
except InvalidCacheBackendError:
pass
else:
return WrappedCache(get_django_cache('default'))
if 'Werkzeug' in CACHES:
if jobtastic_cache_setting:
backend, url = get_backend_by_url(jobtastic_cache_setting)
backend = backend(app=app, url=url)
else:
backend = app.backend
if isinstance(backend, CacheBackend):
return WrappedCache(MemcachedCache(backend.client))
elif isinstance(backend, RedisBackend):
return WrappedCache(RedisCache(backend.client))
# Give up
raise RuntimeError('Cannot find a suitable cache for Jobtastic') | python | def get_cache(app):
"""
Attempt to find a valid cache from the Celery configuration
If the setting is a valid cache, just use it.
Otherwise, if Django is installed, then:
If the setting is a valid Django cache entry, then use that.
If the setting is empty use the default cache
Otherwise, if Werkzeug is installed, then:
If the setting is a valid Celery Memcache or Redis Backend, then use
that.
If the setting is empty and the default Celery Result Backend is
Memcache or Redis, then use that
Otherwise fail
"""
jobtastic_cache_setting = app.conf.get('JOBTASTIC_CACHE')
if isinstance(jobtastic_cache_setting, BaseCache):
return jobtastic_cache_setting
if 'Django' in CACHES:
if jobtastic_cache_setting:
try:
return WrappedCache(get_django_cache(jobtastic_cache_setting))
except InvalidCacheBackendError:
pass
else:
return WrappedCache(get_django_cache('default'))
if 'Werkzeug' in CACHES:
if jobtastic_cache_setting:
backend, url = get_backend_by_url(jobtastic_cache_setting)
backend = backend(app=app, url=url)
else:
backend = app.backend
if isinstance(backend, CacheBackend):
return WrappedCache(MemcachedCache(backend.client))
elif isinstance(backend, RedisBackend):
return WrappedCache(RedisCache(backend.client))
# Give up
raise RuntimeError('Cannot find a suitable cache for Jobtastic') | [
"def",
"get_cache",
"(",
"app",
")",
":",
"jobtastic_cache_setting",
"=",
"app",
".",
"conf",
".",
"get",
"(",
"'JOBTASTIC_CACHE'",
")",
"if",
"isinstance",
"(",
"jobtastic_cache_setting",
",",
"BaseCache",
")",
":",
"return",
"jobtastic_cache_setting",
"if",
"'Django'",
"in",
"CACHES",
":",
"if",
"jobtastic_cache_setting",
":",
"try",
":",
"return",
"WrappedCache",
"(",
"get_django_cache",
"(",
"jobtastic_cache_setting",
")",
")",
"except",
"InvalidCacheBackendError",
":",
"pass",
"else",
":",
"return",
"WrappedCache",
"(",
"get_django_cache",
"(",
"'default'",
")",
")",
"if",
"'Werkzeug'",
"in",
"CACHES",
":",
"if",
"jobtastic_cache_setting",
":",
"backend",
",",
"url",
"=",
"get_backend_by_url",
"(",
"jobtastic_cache_setting",
")",
"backend",
"=",
"backend",
"(",
"app",
"=",
"app",
",",
"url",
"=",
"url",
")",
"else",
":",
"backend",
"=",
"app",
".",
"backend",
"if",
"isinstance",
"(",
"backend",
",",
"CacheBackend",
")",
":",
"return",
"WrappedCache",
"(",
"MemcachedCache",
"(",
"backend",
".",
"client",
")",
")",
"elif",
"isinstance",
"(",
"backend",
",",
"RedisBackend",
")",
":",
"return",
"WrappedCache",
"(",
"RedisCache",
"(",
"backend",
".",
"client",
")",
")",
"# Give up",
"raise",
"RuntimeError",
"(",
"'Cannot find a suitable cache for Jobtastic'",
")"
]
| Attempt to find a valid cache from the Celery configuration
If the setting is a valid cache, just use it.
Otherwise, if Django is installed, then:
If the setting is a valid Django cache entry, then use that.
If the setting is empty use the default cache
Otherwise, if Werkzeug is installed, then:
If the setting is a valid Celery Memcache or Redis Backend, then use
that.
If the setting is empty and the default Celery Result Backend is
Memcache or Redis, then use that
Otherwise fail | [
"Attempt",
"to",
"find",
"a",
"valid",
"cache",
"from",
"the",
"Celery",
"configuration"
]
| 19cd3137ebf46877cee1ee5155d318bb6261ee1c | https://github.com/PolicyStat/jobtastic/blob/19cd3137ebf46877cee1ee5155d318bb6261ee1c/jobtastic/cache/__init__.py#L29-L70 | train |
dodger487/dplython | dplython/dplython.py | select | def select(*args):
"""Select specific columns from DataFrame.
Output will be DplyFrame type. Order of columns will be the same as input into
select.
>>> diamonds >> select(X.color, X.carat) >> head(3)
Out:
color carat
0 E 0.23
1 E 0.21
2 E 0.23
Grouping variables are implied in selection.
>>> df >> group_by(X.a, X.b) >> select(X.c)
returns a dataframe like `df[[X.a, X.b, X.c]]` with the variables appearing in
grouped order before the selected column(s), unless a grouped variable is
explicitly selected
>>> df >> group_by(X.a, X.b) >> select(X.c, X.b)
returns a dataframe like `df[[X.a, X.c, X.b]]`
"""
def select_columns(df, args):
columns = [column._name for column in args]
if df._grouped_on:
for col in df._grouped_on[::-1]:
if col not in columns:
columns.insert(0, col)
return columns
return lambda df: df[select_columns(df, args)] | python | def select(*args):
"""Select specific columns from DataFrame.
Output will be DplyFrame type. Order of columns will be the same as input into
select.
>>> diamonds >> select(X.color, X.carat) >> head(3)
Out:
color carat
0 E 0.23
1 E 0.21
2 E 0.23
Grouping variables are implied in selection.
>>> df >> group_by(X.a, X.b) >> select(X.c)
returns a dataframe like `df[[X.a, X.b, X.c]]` with the variables appearing in
grouped order before the selected column(s), unless a grouped variable is
explicitly selected
>>> df >> group_by(X.a, X.b) >> select(X.c, X.b)
returns a dataframe like `df[[X.a, X.c, X.b]]`
"""
def select_columns(df, args):
columns = [column._name for column in args]
if df._grouped_on:
for col in df._grouped_on[::-1]:
if col not in columns:
columns.insert(0, col)
return columns
return lambda df: df[select_columns(df, args)] | [
"def",
"select",
"(",
"*",
"args",
")",
":",
"def",
"select_columns",
"(",
"df",
",",
"args",
")",
":",
"columns",
"=",
"[",
"column",
".",
"_name",
"for",
"column",
"in",
"args",
"]",
"if",
"df",
".",
"_grouped_on",
":",
"for",
"col",
"in",
"df",
".",
"_grouped_on",
"[",
":",
":",
"-",
"1",
"]",
":",
"if",
"col",
"not",
"in",
"columns",
":",
"columns",
".",
"insert",
"(",
"0",
",",
"col",
")",
"return",
"columns",
"return",
"lambda",
"df",
":",
"df",
"[",
"select_columns",
"(",
"df",
",",
"args",
")",
"]"
]
| Select specific columns from DataFrame.
Output will be DplyFrame type. Order of columns will be the same as input into
select.
>>> diamonds >> select(X.color, X.carat) >> head(3)
Out:
color carat
0 E 0.23
1 E 0.21
2 E 0.23
Grouping variables are implied in selection.
>>> df >> group_by(X.a, X.b) >> select(X.c)
returns a dataframe like `df[[X.a, X.b, X.c]]` with the variables appearing in
grouped order before the selected column(s), unless a grouped variable is
explicitly selected
>>> df >> group_by(X.a, X.b) >> select(X.c, X.b)
returns a dataframe like `df[[X.a, X.c, X.b]]` | [
"Select",
"specific",
"columns",
"from",
"DataFrame",
"."
]
| 09c2a5f4ca67221b2a59928366ca8274357f7234 | https://github.com/dodger487/dplython/blob/09c2a5f4ca67221b2a59928366ca8274357f7234/dplython/dplython.py#L203-L232 | train |
dodger487/dplython | dplython/dplython.py | arrange | def arrange(*args):
"""Sort DataFrame by the input column arguments.
>>> diamonds >> sample_n(5) >> arrange(X.price) >> select(X.depth, X.price)
Out:
depth price
28547 61.0 675
35132 59.1 889
42526 61.3 1323
3468 61.6 3392
23829 62.0 11903
"""
names = [column._name for column in args]
def f(df):
sortby_df = df >> mutate(*args)
index = sortby_df.sort_values([str(arg) for arg in args]).index
return df.loc[index]
return f | python | def arrange(*args):
"""Sort DataFrame by the input column arguments.
>>> diamonds >> sample_n(5) >> arrange(X.price) >> select(X.depth, X.price)
Out:
depth price
28547 61.0 675
35132 59.1 889
42526 61.3 1323
3468 61.6 3392
23829 62.0 11903
"""
names = [column._name for column in args]
def f(df):
sortby_df = df >> mutate(*args)
index = sortby_df.sort_values([str(arg) for arg in args]).index
return df.loc[index]
return f | [
"def",
"arrange",
"(",
"*",
"args",
")",
":",
"names",
"=",
"[",
"column",
".",
"_name",
"for",
"column",
"in",
"args",
"]",
"def",
"f",
"(",
"df",
")",
":",
"sortby_df",
"=",
"df",
">>",
"mutate",
"(",
"*",
"args",
")",
"index",
"=",
"sortby_df",
".",
"sort_values",
"(",
"[",
"str",
"(",
"arg",
")",
"for",
"arg",
"in",
"args",
"]",
")",
".",
"index",
"return",
"df",
".",
"loc",
"[",
"index",
"]",
"return",
"f"
]
| Sort DataFrame by the input column arguments.
>>> diamonds >> sample_n(5) >> arrange(X.price) >> select(X.depth, X.price)
Out:
depth price
28547 61.0 675
35132 59.1 889
42526 61.3 1323
3468 61.6 3392
23829 62.0 11903 | [
"Sort",
"DataFrame",
"by",
"the",
"input",
"column",
"arguments",
"."
]
| 09c2a5f4ca67221b2a59928366ca8274357f7234 | https://github.com/dodger487/dplython/blob/09c2a5f4ca67221b2a59928366ca8274357f7234/dplython/dplython.py#L377-L394 | train |
dodger487/dplython | dplython/dplython.py | rename | def rename(**kwargs):
"""Rename one or more columns, leaving other columns unchanged
Example usage:
diamonds >> rename(new_name=old_name)
"""
def rename_columns(df):
column_assignments = {old_name_later._name: new_name
for new_name, old_name_later in kwargs.items()}
return df.rename(columns=column_assignments)
return rename_columns | python | def rename(**kwargs):
"""Rename one or more columns, leaving other columns unchanged
Example usage:
diamonds >> rename(new_name=old_name)
"""
def rename_columns(df):
column_assignments = {old_name_later._name: new_name
for new_name, old_name_later in kwargs.items()}
return df.rename(columns=column_assignments)
return rename_columns | [
"def",
"rename",
"(",
"*",
"*",
"kwargs",
")",
":",
"def",
"rename_columns",
"(",
"df",
")",
":",
"column_assignments",
"=",
"{",
"old_name_later",
".",
"_name",
":",
"new_name",
"for",
"new_name",
",",
"old_name_later",
"in",
"kwargs",
".",
"items",
"(",
")",
"}",
"return",
"df",
".",
"rename",
"(",
"columns",
"=",
"column_assignments",
")",
"return",
"rename_columns"
]
| Rename one or more columns, leaving other columns unchanged
Example usage:
diamonds >> rename(new_name=old_name) | [
"Rename",
"one",
"or",
"more",
"columns",
"leaving",
"other",
"columns",
"unchanged"
]
| 09c2a5f4ca67221b2a59928366ca8274357f7234 | https://github.com/dodger487/dplython/blob/09c2a5f4ca67221b2a59928366ca8274357f7234/dplython/dplython.py#L453-L463 | train |
dodger487/dplython | dplython/dplython.py | transmute | def transmute(*args, **kwargs):
""" Similar to `select` but allows mutation in column definitions.
In : (diamonds >>
head(3) >>
transmute(new_price=X.price * 2, x_plus_y=X.x + X.y))
Out:
new_price x_plus_y
0 652 7.93
1 652 7.73
2 654 8.12
"""
mutate_dateframe_fn = mutate(*args, **dict(kwargs))
column_names_args = [str(arg) for arg in args]
column_names_kwargs = [name for name, _
in _dict_to_possibly_ordered_tuples(kwargs)]
column_names = column_names_args + column_names_kwargs
return lambda df: mutate_dateframe_fn(df)[column_names] | python | def transmute(*args, **kwargs):
""" Similar to `select` but allows mutation in column definitions.
In : (diamonds >>
head(3) >>
transmute(new_price=X.price * 2, x_plus_y=X.x + X.y))
Out:
new_price x_plus_y
0 652 7.93
1 652 7.73
2 654 8.12
"""
mutate_dateframe_fn = mutate(*args, **dict(kwargs))
column_names_args = [str(arg) for arg in args]
column_names_kwargs = [name for name, _
in _dict_to_possibly_ordered_tuples(kwargs)]
column_names = column_names_args + column_names_kwargs
return lambda df: mutate_dateframe_fn(df)[column_names] | [
"def",
"transmute",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"mutate_dateframe_fn",
"=",
"mutate",
"(",
"*",
"args",
",",
"*",
"*",
"dict",
"(",
"kwargs",
")",
")",
"column_names_args",
"=",
"[",
"str",
"(",
"arg",
")",
"for",
"arg",
"in",
"args",
"]",
"column_names_kwargs",
"=",
"[",
"name",
"for",
"name",
",",
"_",
"in",
"_dict_to_possibly_ordered_tuples",
"(",
"kwargs",
")",
"]",
"column_names",
"=",
"column_names_args",
"+",
"column_names_kwargs",
"return",
"lambda",
"df",
":",
"mutate_dateframe_fn",
"(",
"df",
")",
"[",
"column_names",
"]"
]
| Similar to `select` but allows mutation in column definitions.
In : (diamonds >>
head(3) >>
transmute(new_price=X.price * 2, x_plus_y=X.x + X.y))
Out:
new_price x_plus_y
0 652 7.93
1 652 7.73
2 654 8.12 | [
"Similar",
"to",
"select",
"but",
"allows",
"mutation",
"in",
"column",
"definitions",
"."
]
| 09c2a5f4ca67221b2a59928366ca8274357f7234 | https://github.com/dodger487/dplython/blob/09c2a5f4ca67221b2a59928366ca8274357f7234/dplython/dplython.py#L467-L484 | train |
dodger487/dplython | dplython/dplython.py | get_join_cols | def get_join_cols(by_entry):
""" helper function used for joins
builds left and right join list for join function
"""
left_cols = []
right_cols = []
for col in by_entry:
if isinstance(col, str):
left_cols.append(col)
right_cols.append(col)
else:
left_cols.append(col[0])
right_cols.append(col[1])
return left_cols, right_cols | python | def get_join_cols(by_entry):
""" helper function used for joins
builds left and right join list for join function
"""
left_cols = []
right_cols = []
for col in by_entry:
if isinstance(col, str):
left_cols.append(col)
right_cols.append(col)
else:
left_cols.append(col[0])
right_cols.append(col[1])
return left_cols, right_cols | [
"def",
"get_join_cols",
"(",
"by_entry",
")",
":",
"left_cols",
"=",
"[",
"]",
"right_cols",
"=",
"[",
"]",
"for",
"col",
"in",
"by_entry",
":",
"if",
"isinstance",
"(",
"col",
",",
"str",
")",
":",
"left_cols",
".",
"append",
"(",
"col",
")",
"right_cols",
".",
"append",
"(",
"col",
")",
"else",
":",
"left_cols",
".",
"append",
"(",
"col",
"[",
"0",
"]",
")",
"right_cols",
".",
"append",
"(",
"col",
"[",
"1",
"]",
")",
"return",
"left_cols",
",",
"right_cols"
]
| helper function used for joins
builds left and right join list for join function | [
"helper",
"function",
"used",
"for",
"joins",
"builds",
"left",
"and",
"right",
"join",
"list",
"for",
"join",
"function"
]
| 09c2a5f4ca67221b2a59928366ca8274357f7234 | https://github.com/dodger487/dplython/blob/09c2a5f4ca67221b2a59928366ca8274357f7234/dplython/dplython.py#L504-L517 | train |
dodger487/dplython | dplython/dplython.py | mutating_join | def mutating_join(*args, **kwargs):
""" generic function for mutating dplyr-style joins
"""
# candidate for improvement
left = args[0]
right = args[1]
if 'by' in kwargs:
left_cols, right_cols = get_join_cols(kwargs['by'])
else:
left_cols, right_cols = None, None
if 'suffixes' in kwargs:
dsuffixes = kwargs['suffixes']
else:
dsuffixes = ('_x', '_y')
if left._grouped_on:
outDf = (DplyFrame((left >> ungroup())
.merge(right, how=kwargs['how'], left_on=left_cols,
right_on=right_cols, suffixes=dsuffixes))
.regroup(left._grouped_on))
else:
outDf = DplyFrame(left.merge(right, how=kwargs['how'], left_on=left_cols,
right_on=right_cols, suffixes=dsuffixes))
return outDf | python | def mutating_join(*args, **kwargs):
""" generic function for mutating dplyr-style joins
"""
# candidate for improvement
left = args[0]
right = args[1]
if 'by' in kwargs:
left_cols, right_cols = get_join_cols(kwargs['by'])
else:
left_cols, right_cols = None, None
if 'suffixes' in kwargs:
dsuffixes = kwargs['suffixes']
else:
dsuffixes = ('_x', '_y')
if left._grouped_on:
outDf = (DplyFrame((left >> ungroup())
.merge(right, how=kwargs['how'], left_on=left_cols,
right_on=right_cols, suffixes=dsuffixes))
.regroup(left._grouped_on))
else:
outDf = DplyFrame(left.merge(right, how=kwargs['how'], left_on=left_cols,
right_on=right_cols, suffixes=dsuffixes))
return outDf | [
"def",
"mutating_join",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# candidate for improvement",
"left",
"=",
"args",
"[",
"0",
"]",
"right",
"=",
"args",
"[",
"1",
"]",
"if",
"'by'",
"in",
"kwargs",
":",
"left_cols",
",",
"right_cols",
"=",
"get_join_cols",
"(",
"kwargs",
"[",
"'by'",
"]",
")",
"else",
":",
"left_cols",
",",
"right_cols",
"=",
"None",
",",
"None",
"if",
"'suffixes'",
"in",
"kwargs",
":",
"dsuffixes",
"=",
"kwargs",
"[",
"'suffixes'",
"]",
"else",
":",
"dsuffixes",
"=",
"(",
"'_x'",
",",
"'_y'",
")",
"if",
"left",
".",
"_grouped_on",
":",
"outDf",
"=",
"(",
"DplyFrame",
"(",
"(",
"left",
">>",
"ungroup",
"(",
")",
")",
".",
"merge",
"(",
"right",
",",
"how",
"=",
"kwargs",
"[",
"'how'",
"]",
",",
"left_on",
"=",
"left_cols",
",",
"right_on",
"=",
"right_cols",
",",
"suffixes",
"=",
"dsuffixes",
")",
")",
".",
"regroup",
"(",
"left",
".",
"_grouped_on",
")",
")",
"else",
":",
"outDf",
"=",
"DplyFrame",
"(",
"left",
".",
"merge",
"(",
"right",
",",
"how",
"=",
"kwargs",
"[",
"'how'",
"]",
",",
"left_on",
"=",
"left_cols",
",",
"right_on",
"=",
"right_cols",
",",
"suffixes",
"=",
"dsuffixes",
")",
")",
"return",
"outDf"
]
| generic function for mutating dplyr-style joins | [
"generic",
"function",
"for",
"mutating",
"dplyr",
"-",
"style",
"joins"
]
| 09c2a5f4ca67221b2a59928366ca8274357f7234 | https://github.com/dodger487/dplython/blob/09c2a5f4ca67221b2a59928366ca8274357f7234/dplython/dplython.py#L520-L542 | train |
mher/chartkick.py | chartkick/ext.py | ChartExtension._chart_support | def _chart_support(self, name, data, caller, **kwargs):
"template chart support function"
id = 'chart-%s' % next(self.id)
name = self._chart_class_name(name)
options = dict(self.environment.options)
options.update(name=name, id=id)
# jinja2 prepends 'l_' or 'l_{{ n }}'(ver>=2.9) to keys
if jinja2.__version__ >= '2.9':
kwargs = dict((k[4:], v) for (k, v) in kwargs.items())
else:
kwargs = dict((k[2:], v) for (k, v) in kwargs.items())
if self._library is None:
self._library = self.load_library()
id = kwargs.get('id', '')
library = self._library.get(id, {})
# apply options from a tag
library.update(kwargs.get('library', {}))
# apply options from chartkick.json
kwargs.update(library=library)
options.update(kwargs)
return CHART_HTML.format(data=data, options=json.dumps(kwargs),
**options) | python | def _chart_support(self, name, data, caller, **kwargs):
"template chart support function"
id = 'chart-%s' % next(self.id)
name = self._chart_class_name(name)
options = dict(self.environment.options)
options.update(name=name, id=id)
# jinja2 prepends 'l_' or 'l_{{ n }}'(ver>=2.9) to keys
if jinja2.__version__ >= '2.9':
kwargs = dict((k[4:], v) for (k, v) in kwargs.items())
else:
kwargs = dict((k[2:], v) for (k, v) in kwargs.items())
if self._library is None:
self._library = self.load_library()
id = kwargs.get('id', '')
library = self._library.get(id, {})
# apply options from a tag
library.update(kwargs.get('library', {}))
# apply options from chartkick.json
kwargs.update(library=library)
options.update(kwargs)
return CHART_HTML.format(data=data, options=json.dumps(kwargs),
**options) | [
"def",
"_chart_support",
"(",
"self",
",",
"name",
",",
"data",
",",
"caller",
",",
"*",
"*",
"kwargs",
")",
":",
"id",
"=",
"'chart-%s'",
"%",
"next",
"(",
"self",
".",
"id",
")",
"name",
"=",
"self",
".",
"_chart_class_name",
"(",
"name",
")",
"options",
"=",
"dict",
"(",
"self",
".",
"environment",
".",
"options",
")",
"options",
".",
"update",
"(",
"name",
"=",
"name",
",",
"id",
"=",
"id",
")",
"# jinja2 prepends 'l_' or 'l_{{ n }}'(ver>=2.9) to keys",
"if",
"jinja2",
".",
"__version__",
">=",
"'2.9'",
":",
"kwargs",
"=",
"dict",
"(",
"(",
"k",
"[",
"4",
":",
"]",
",",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"kwargs",
".",
"items",
"(",
")",
")",
"else",
":",
"kwargs",
"=",
"dict",
"(",
"(",
"k",
"[",
"2",
":",
"]",
",",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"kwargs",
".",
"items",
"(",
")",
")",
"if",
"self",
".",
"_library",
"is",
"None",
":",
"self",
".",
"_library",
"=",
"self",
".",
"load_library",
"(",
")",
"id",
"=",
"kwargs",
".",
"get",
"(",
"'id'",
",",
"''",
")",
"library",
"=",
"self",
".",
"_library",
".",
"get",
"(",
"id",
",",
"{",
"}",
")",
"# apply options from a tag",
"library",
".",
"update",
"(",
"kwargs",
".",
"get",
"(",
"'library'",
",",
"{",
"}",
")",
")",
"# apply options from chartkick.json",
"kwargs",
".",
"update",
"(",
"library",
"=",
"library",
")",
"options",
".",
"update",
"(",
"kwargs",
")",
"return",
"CHART_HTML",
".",
"format",
"(",
"data",
"=",
"data",
",",
"options",
"=",
"json",
".",
"dumps",
"(",
"kwargs",
")",
",",
"*",
"*",
"options",
")"
]
| template chart support function | [
"template",
"chart",
"support",
"function"
]
| 3411f36a069560fe1ba218e0a35f68c413332f63 | https://github.com/mher/chartkick.py/blob/3411f36a069560fe1ba218e0a35f68c413332f63/chartkick/ext.py#L63-L88 | train |
mher/chartkick.py | chartkick/ext.py | ChartExtension.load_library | def load_library(self):
"loads configuration options"
try:
filename = self.environment.get_template('chartkick.json').filename
except TemplateNotFound:
return {}
else:
options = Options()
options.load(filename)
return options | python | def load_library(self):
"loads configuration options"
try:
filename = self.environment.get_template('chartkick.json').filename
except TemplateNotFound:
return {}
else:
options = Options()
options.load(filename)
return options | [
"def",
"load_library",
"(",
"self",
")",
":",
"try",
":",
"filename",
"=",
"self",
".",
"environment",
".",
"get_template",
"(",
"'chartkick.json'",
")",
".",
"filename",
"except",
"TemplateNotFound",
":",
"return",
"{",
"}",
"else",
":",
"options",
"=",
"Options",
"(",
")",
"options",
".",
"load",
"(",
"filename",
")",
"return",
"options"
]
| loads configuration options | [
"loads",
"configuration",
"options"
]
| 3411f36a069560fe1ba218e0a35f68c413332f63 | https://github.com/mher/chartkick.py/blob/3411f36a069560fe1ba218e0a35f68c413332f63/chartkick/ext.py#L94-L103 | train |
mher/chartkick.py | chartkick/__init__.py | js | def js():
"returns home directory of js"
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'js') | python | def js():
"returns home directory of js"
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'js') | [
"def",
"js",
"(",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
",",
"'js'",
")"
]
| returns home directory of js | [
"returns",
"home",
"directory",
"of",
"js"
]
| 3411f36a069560fe1ba218e0a35f68c413332f63 | https://github.com/mher/chartkick.py/blob/3411f36a069560fe1ba218e0a35f68c413332f63/chartkick/__init__.py#L8-L10 | train |
mher/chartkick.py | chartkick/templatetags/chartkick.py | parse_options | def parse_options(source):
"""parses chart tag options"""
options = {}
tokens = [t.strip() for t in source.split('=')]
name = tokens[0]
for token in tokens[1:-1]:
value, next_name = token.rsplit(' ', 1)
options[name.strip()] = value
name = next_name
options[name.strip()] = tokens[-1].strip()
return options | python | def parse_options(source):
"""parses chart tag options"""
options = {}
tokens = [t.strip() for t in source.split('=')]
name = tokens[0]
for token in tokens[1:-1]:
value, next_name = token.rsplit(' ', 1)
options[name.strip()] = value
name = next_name
options[name.strip()] = tokens[-1].strip()
return options | [
"def",
"parse_options",
"(",
"source",
")",
":",
"options",
"=",
"{",
"}",
"tokens",
"=",
"[",
"t",
".",
"strip",
"(",
")",
"for",
"t",
"in",
"source",
".",
"split",
"(",
"'='",
")",
"]",
"name",
"=",
"tokens",
"[",
"0",
"]",
"for",
"token",
"in",
"tokens",
"[",
"1",
":",
"-",
"1",
"]",
":",
"value",
",",
"next_name",
"=",
"token",
".",
"rsplit",
"(",
"' '",
",",
"1",
")",
"options",
"[",
"name",
".",
"strip",
"(",
")",
"]",
"=",
"value",
"name",
"=",
"next_name",
"options",
"[",
"name",
".",
"strip",
"(",
")",
"]",
"=",
"tokens",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"return",
"options"
]
| parses chart tag options | [
"parses",
"chart",
"tag",
"options"
]
| 3411f36a069560fe1ba218e0a35f68c413332f63 | https://github.com/mher/chartkick.py/blob/3411f36a069560fe1ba218e0a35f68c413332f63/chartkick/templatetags/chartkick.py#L91-L102 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.copy | def copy(self):
"""Returns a copy of the RigidTransform.
Returns
-------
:obj:`RigidTransform`
A deep copy of the RigidTransform.
"""
return RigidTransform(np.copy(self.rotation), np.copy(self.translation), self.from_frame, self.to_frame) | python | def copy(self):
"""Returns a copy of the RigidTransform.
Returns
-------
:obj:`RigidTransform`
A deep copy of the RigidTransform.
"""
return RigidTransform(np.copy(self.rotation), np.copy(self.translation), self.from_frame, self.to_frame) | [
"def",
"copy",
"(",
"self",
")",
":",
"return",
"RigidTransform",
"(",
"np",
".",
"copy",
"(",
"self",
".",
"rotation",
")",
",",
"np",
".",
"copy",
"(",
"self",
".",
"translation",
")",
",",
"self",
".",
"from_frame",
",",
"self",
".",
"to_frame",
")"
]
| Returns a copy of the RigidTransform.
Returns
-------
:obj:`RigidTransform`
A deep copy of the RigidTransform. | [
"Returns",
"a",
"copy",
"of",
"the",
"RigidTransform",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L81-L89 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform._check_valid_rotation | def _check_valid_rotation(self, rotation):
"""Checks that the given rotation matrix is valid.
"""
if not isinstance(rotation, np.ndarray) or not np.issubdtype(rotation.dtype, np.number):
raise ValueError('Rotation must be specified as numeric numpy array')
if len(rotation.shape) != 2 or rotation.shape[0] != 3 or rotation.shape[1] != 3:
raise ValueError('Rotation must be specified as a 3x3 ndarray')
if np.abs(np.linalg.det(rotation) - 1.0) > 1e-3:
raise ValueError('Illegal rotation. Must have determinant == 1.0') | python | def _check_valid_rotation(self, rotation):
"""Checks that the given rotation matrix is valid.
"""
if not isinstance(rotation, np.ndarray) or not np.issubdtype(rotation.dtype, np.number):
raise ValueError('Rotation must be specified as numeric numpy array')
if len(rotation.shape) != 2 or rotation.shape[0] != 3 or rotation.shape[1] != 3:
raise ValueError('Rotation must be specified as a 3x3 ndarray')
if np.abs(np.linalg.det(rotation) - 1.0) > 1e-3:
raise ValueError('Illegal rotation. Must have determinant == 1.0') | [
"def",
"_check_valid_rotation",
"(",
"self",
",",
"rotation",
")",
":",
"if",
"not",
"isinstance",
"(",
"rotation",
",",
"np",
".",
"ndarray",
")",
"or",
"not",
"np",
".",
"issubdtype",
"(",
"rotation",
".",
"dtype",
",",
"np",
".",
"number",
")",
":",
"raise",
"ValueError",
"(",
"'Rotation must be specified as numeric numpy array'",
")",
"if",
"len",
"(",
"rotation",
".",
"shape",
")",
"!=",
"2",
"or",
"rotation",
".",
"shape",
"[",
"0",
"]",
"!=",
"3",
"or",
"rotation",
".",
"shape",
"[",
"1",
"]",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'Rotation must be specified as a 3x3 ndarray'",
")",
"if",
"np",
".",
"abs",
"(",
"np",
".",
"linalg",
".",
"det",
"(",
"rotation",
")",
"-",
"1.0",
")",
">",
"1e-3",
":",
"raise",
"ValueError",
"(",
"'Illegal rotation. Must have determinant == 1.0'",
")"
]
| Checks that the given rotation matrix is valid. | [
"Checks",
"that",
"the",
"given",
"rotation",
"matrix",
"is",
"valid",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L91-L101 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform._check_valid_translation | def _check_valid_translation(self, translation):
"""Checks that the translation vector is valid.
"""
if not isinstance(translation, np.ndarray) or not np.issubdtype(translation.dtype, np.number):
raise ValueError('Translation must be specified as numeric numpy array')
t = translation.squeeze()
if len(t.shape) != 1 or t.shape[0] != 3:
raise ValueError('Translation must be specified as a 3-vector, 3x1 ndarray, or 1x3 ndarray') | python | def _check_valid_translation(self, translation):
"""Checks that the translation vector is valid.
"""
if not isinstance(translation, np.ndarray) or not np.issubdtype(translation.dtype, np.number):
raise ValueError('Translation must be specified as numeric numpy array')
t = translation.squeeze()
if len(t.shape) != 1 or t.shape[0] != 3:
raise ValueError('Translation must be specified as a 3-vector, 3x1 ndarray, or 1x3 ndarray') | [
"def",
"_check_valid_translation",
"(",
"self",
",",
"translation",
")",
":",
"if",
"not",
"isinstance",
"(",
"translation",
",",
"np",
".",
"ndarray",
")",
"or",
"not",
"np",
".",
"issubdtype",
"(",
"translation",
".",
"dtype",
",",
"np",
".",
"number",
")",
":",
"raise",
"ValueError",
"(",
"'Translation must be specified as numeric numpy array'",
")",
"t",
"=",
"translation",
".",
"squeeze",
"(",
")",
"if",
"len",
"(",
"t",
".",
"shape",
")",
"!=",
"1",
"or",
"t",
".",
"shape",
"[",
"0",
"]",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'Translation must be specified as a 3-vector, 3x1 ndarray, or 1x3 ndarray'",
")"
]
| Checks that the translation vector is valid. | [
"Checks",
"that",
"the",
"translation",
"vector",
"is",
"valid",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L103-L111 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.interpolate_with | def interpolate_with(self, other_tf, t):
"""Interpolate with another rigid transformation.
Parameters
----------
other_tf : :obj:`RigidTransform`
The transform to interpolate with.
t : float
The interpolation step in [0,1], where 0 favors this RigidTransform.
Returns
-------
:obj:`RigidTransform`
The interpolated RigidTransform.
Raises
------
ValueError
If t isn't in [0,1].
"""
if t < 0 or t > 1:
raise ValueError('Must interpolate between 0 and 1')
interp_translation = (1.0 - t) * self.translation + t * other_tf.translation
interp_rotation = transformations.quaternion_slerp(self.quaternion, other_tf.quaternion, t)
interp_tf = RigidTransform(rotation=interp_rotation, translation=interp_translation,
from_frame = self.from_frame, to_frame = self.to_frame)
return interp_tf | python | def interpolate_with(self, other_tf, t):
"""Interpolate with another rigid transformation.
Parameters
----------
other_tf : :obj:`RigidTransform`
The transform to interpolate with.
t : float
The interpolation step in [0,1], where 0 favors this RigidTransform.
Returns
-------
:obj:`RigidTransform`
The interpolated RigidTransform.
Raises
------
ValueError
If t isn't in [0,1].
"""
if t < 0 or t > 1:
raise ValueError('Must interpolate between 0 and 1')
interp_translation = (1.0 - t) * self.translation + t * other_tf.translation
interp_rotation = transformations.quaternion_slerp(self.quaternion, other_tf.quaternion, t)
interp_tf = RigidTransform(rotation=interp_rotation, translation=interp_translation,
from_frame = self.from_frame, to_frame = self.to_frame)
return interp_tf | [
"def",
"interpolate_with",
"(",
"self",
",",
"other_tf",
",",
"t",
")",
":",
"if",
"t",
"<",
"0",
"or",
"t",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Must interpolate between 0 and 1'",
")",
"interp_translation",
"=",
"(",
"1.0",
"-",
"t",
")",
"*",
"self",
".",
"translation",
"+",
"t",
"*",
"other_tf",
".",
"translation",
"interp_rotation",
"=",
"transformations",
".",
"quaternion_slerp",
"(",
"self",
".",
"quaternion",
",",
"other_tf",
".",
"quaternion",
",",
"t",
")",
"interp_tf",
"=",
"RigidTransform",
"(",
"rotation",
"=",
"interp_rotation",
",",
"translation",
"=",
"interp_translation",
",",
"from_frame",
"=",
"self",
".",
"from_frame",
",",
"to_frame",
"=",
"self",
".",
"to_frame",
")",
"return",
"interp_tf"
]
| Interpolate with another rigid transformation.
Parameters
----------
other_tf : :obj:`RigidTransform`
The transform to interpolate with.
t : float
The interpolation step in [0,1], where 0 favors this RigidTransform.
Returns
-------
:obj:`RigidTransform`
The interpolated RigidTransform.
Raises
------
ValueError
If t isn't in [0,1]. | [
"Interpolate",
"with",
"another",
"rigid",
"transformation",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L288-L316 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.linear_trajectory_to | def linear_trajectory_to(self, target_tf, traj_len):
"""Creates a trajectory of poses linearly interpolated from this tf to a target tf.
Parameters
----------
target_tf : :obj:`RigidTransform`
The RigidTransform to interpolate to.
traj_len : int
The number of RigidTransforms in the returned trajectory.
Returns
-------
:obj:`list` of :obj:`RigidTransform`
A list of interpolated transforms from this transform to the target.
"""
if traj_len < 0:
raise ValueError('Traj len must at least 0')
delta_t = 1.0 / (traj_len + 1)
t = 0.0
traj = []
while t < 1.0:
traj.append(self.interpolate_with(target_tf, t))
t += delta_t
traj.append(target_tf)
return traj | python | def linear_trajectory_to(self, target_tf, traj_len):
"""Creates a trajectory of poses linearly interpolated from this tf to a target tf.
Parameters
----------
target_tf : :obj:`RigidTransform`
The RigidTransform to interpolate to.
traj_len : int
The number of RigidTransforms in the returned trajectory.
Returns
-------
:obj:`list` of :obj:`RigidTransform`
A list of interpolated transforms from this transform to the target.
"""
if traj_len < 0:
raise ValueError('Traj len must at least 0')
delta_t = 1.0 / (traj_len + 1)
t = 0.0
traj = []
while t < 1.0:
traj.append(self.interpolate_with(target_tf, t))
t += delta_t
traj.append(target_tf)
return traj | [
"def",
"linear_trajectory_to",
"(",
"self",
",",
"target_tf",
",",
"traj_len",
")",
":",
"if",
"traj_len",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'Traj len must at least 0'",
")",
"delta_t",
"=",
"1.0",
"/",
"(",
"traj_len",
"+",
"1",
")",
"t",
"=",
"0.0",
"traj",
"=",
"[",
"]",
"while",
"t",
"<",
"1.0",
":",
"traj",
".",
"append",
"(",
"self",
".",
"interpolate_with",
"(",
"target_tf",
",",
"t",
")",
")",
"t",
"+=",
"delta_t",
"traj",
".",
"append",
"(",
"target_tf",
")",
"return",
"traj"
]
| Creates a trajectory of poses linearly interpolated from this tf to a target tf.
Parameters
----------
target_tf : :obj:`RigidTransform`
The RigidTransform to interpolate to.
traj_len : int
The number of RigidTransforms in the returned trajectory.
Returns
-------
:obj:`list` of :obj:`RigidTransform`
A list of interpolated transforms from this transform to the target. | [
"Creates",
"a",
"trajectory",
"of",
"poses",
"linearly",
"interpolated",
"from",
"this",
"tf",
"to",
"a",
"target",
"tf",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L318-L342 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.apply | def apply(self, points):
"""Applies the rigid transformation to a set of 3D objects.
Parameters
----------
points : :obj:`BagOfPoints`
A set of objects to transform. Could be any subclass of BagOfPoints.
Returns
-------
:obj:`BagOfPoints`
A transformed set of objects of the same type as the input.
Raises
------
ValueError
If the input is not a Bag of 3D points or if the points are not in
this transform's from_frame.
"""
if not isinstance(points, BagOfPoints):
raise ValueError('Rigid transformations can only be applied to bags of points')
if points.dim != 3:
raise ValueError('Rigid transformations can only be applied to 3-dimensional points')
if points.frame != self._from_frame:
raise ValueError('Cannot transform points in frame %s with rigid transformation from frame %s to frame %s' %(points.frame, self._from_frame, self._to_frame))
if isinstance(points, BagOfVectors):
# rotation only
x = points.data
x_tf = self.rotation.dot(x)
else:
# extract numpy data, homogenize, and transform
x = points.data
if len(x.shape) == 1:
x = x[:,np.newaxis]
x_homog = np.r_[x, np.ones([1, points.num_points])]
x_homog_tf = self.matrix.dot(x_homog)
x_tf = x_homog_tf[0:3,:]
# output in BagOfPoints format
if isinstance(points, PointCloud):
return PointCloud(x_tf, frame=self._to_frame)
elif isinstance(points, Point):
return Point(x_tf, frame=self._to_frame)
elif isinstance(points, Direction):
return Direction(x_tf, frame=self._to_frame)
elif isinstance(points, NormalCloud):
return NormalCloud(x_tf, frame=self._to_frame)
raise ValueError('Type %s not yet supported' %(type(points))) | python | def apply(self, points):
"""Applies the rigid transformation to a set of 3D objects.
Parameters
----------
points : :obj:`BagOfPoints`
A set of objects to transform. Could be any subclass of BagOfPoints.
Returns
-------
:obj:`BagOfPoints`
A transformed set of objects of the same type as the input.
Raises
------
ValueError
If the input is not a Bag of 3D points or if the points are not in
this transform's from_frame.
"""
if not isinstance(points, BagOfPoints):
raise ValueError('Rigid transformations can only be applied to bags of points')
if points.dim != 3:
raise ValueError('Rigid transformations can only be applied to 3-dimensional points')
if points.frame != self._from_frame:
raise ValueError('Cannot transform points in frame %s with rigid transformation from frame %s to frame %s' %(points.frame, self._from_frame, self._to_frame))
if isinstance(points, BagOfVectors):
# rotation only
x = points.data
x_tf = self.rotation.dot(x)
else:
# extract numpy data, homogenize, and transform
x = points.data
if len(x.shape) == 1:
x = x[:,np.newaxis]
x_homog = np.r_[x, np.ones([1, points.num_points])]
x_homog_tf = self.matrix.dot(x_homog)
x_tf = x_homog_tf[0:3,:]
# output in BagOfPoints format
if isinstance(points, PointCloud):
return PointCloud(x_tf, frame=self._to_frame)
elif isinstance(points, Point):
return Point(x_tf, frame=self._to_frame)
elif isinstance(points, Direction):
return Direction(x_tf, frame=self._to_frame)
elif isinstance(points, NormalCloud):
return NormalCloud(x_tf, frame=self._to_frame)
raise ValueError('Type %s not yet supported' %(type(points))) | [
"def",
"apply",
"(",
"self",
",",
"points",
")",
":",
"if",
"not",
"isinstance",
"(",
"points",
",",
"BagOfPoints",
")",
":",
"raise",
"ValueError",
"(",
"'Rigid transformations can only be applied to bags of points'",
")",
"if",
"points",
".",
"dim",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'Rigid transformations can only be applied to 3-dimensional points'",
")",
"if",
"points",
".",
"frame",
"!=",
"self",
".",
"_from_frame",
":",
"raise",
"ValueError",
"(",
"'Cannot transform points in frame %s with rigid transformation from frame %s to frame %s'",
"%",
"(",
"points",
".",
"frame",
",",
"self",
".",
"_from_frame",
",",
"self",
".",
"_to_frame",
")",
")",
"if",
"isinstance",
"(",
"points",
",",
"BagOfVectors",
")",
":",
"# rotation only",
"x",
"=",
"points",
".",
"data",
"x_tf",
"=",
"self",
".",
"rotation",
".",
"dot",
"(",
"x",
")",
"else",
":",
"# extract numpy data, homogenize, and transform",
"x",
"=",
"points",
".",
"data",
"if",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"1",
":",
"x",
"=",
"x",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"x_homog",
"=",
"np",
".",
"r_",
"[",
"x",
",",
"np",
".",
"ones",
"(",
"[",
"1",
",",
"points",
".",
"num_points",
"]",
")",
"]",
"x_homog_tf",
"=",
"self",
".",
"matrix",
".",
"dot",
"(",
"x_homog",
")",
"x_tf",
"=",
"x_homog_tf",
"[",
"0",
":",
"3",
",",
":",
"]",
"# output in BagOfPoints format",
"if",
"isinstance",
"(",
"points",
",",
"PointCloud",
")",
":",
"return",
"PointCloud",
"(",
"x_tf",
",",
"frame",
"=",
"self",
".",
"_to_frame",
")",
"elif",
"isinstance",
"(",
"points",
",",
"Point",
")",
":",
"return",
"Point",
"(",
"x_tf",
",",
"frame",
"=",
"self",
".",
"_to_frame",
")",
"elif",
"isinstance",
"(",
"points",
",",
"Direction",
")",
":",
"return",
"Direction",
"(",
"x_tf",
",",
"frame",
"=",
"self",
".",
"_to_frame",
")",
"elif",
"isinstance",
"(",
"points",
",",
"NormalCloud",
")",
":",
"return",
"NormalCloud",
"(",
"x_tf",
",",
"frame",
"=",
"self",
".",
"_to_frame",
")",
"raise",
"ValueError",
"(",
"'Type %s not yet supported'",
"%",
"(",
"type",
"(",
"points",
")",
")",
")"
]
| Applies the rigid transformation to a set of 3D objects.
Parameters
----------
points : :obj:`BagOfPoints`
A set of objects to transform. Could be any subclass of BagOfPoints.
Returns
-------
:obj:`BagOfPoints`
A transformed set of objects of the same type as the input.
Raises
------
ValueError
If the input is not a Bag of 3D points or if the points are not in
this transform's from_frame. | [
"Applies",
"the",
"rigid",
"transformation",
"to",
"a",
"set",
"of",
"3D",
"objects",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L344-L392 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.dot | def dot(self, other_tf):
"""Compose this rigid transform with another.
This transform is on the left-hand side of the composition.
Parameters
----------
other_tf : :obj:`RigidTransform`
The other RigidTransform to compose with this one.
Returns
-------
:obj:`RigidTransform`
A RigidTransform that represents the composition.
Raises
------
ValueError
If the to_frame of other_tf is not identical to this transform's
from_frame.
"""
if other_tf.to_frame != self.from_frame:
raise ValueError('To frame of right hand side ({0}) must match from frame of left hand side ({1})'.format(other_tf.to_frame, self.from_frame))
pose_tf = self.matrix.dot(other_tf.matrix)
rotation, translation = RigidTransform.rotation_and_translation_from_matrix(pose_tf)
if isinstance(other_tf, SimilarityTransform):
return SimilarityTransform(self.rotation, self.translation, scale=1.0,
from_frame=self.from_frame,
to_frame=self.to_frame) * other_tf
return RigidTransform(rotation, translation,
from_frame=other_tf.from_frame,
to_frame=self.to_frame) | python | def dot(self, other_tf):
"""Compose this rigid transform with another.
This transform is on the left-hand side of the composition.
Parameters
----------
other_tf : :obj:`RigidTransform`
The other RigidTransform to compose with this one.
Returns
-------
:obj:`RigidTransform`
A RigidTransform that represents the composition.
Raises
------
ValueError
If the to_frame of other_tf is not identical to this transform's
from_frame.
"""
if other_tf.to_frame != self.from_frame:
raise ValueError('To frame of right hand side ({0}) must match from frame of left hand side ({1})'.format(other_tf.to_frame, self.from_frame))
pose_tf = self.matrix.dot(other_tf.matrix)
rotation, translation = RigidTransform.rotation_and_translation_from_matrix(pose_tf)
if isinstance(other_tf, SimilarityTransform):
return SimilarityTransform(self.rotation, self.translation, scale=1.0,
from_frame=self.from_frame,
to_frame=self.to_frame) * other_tf
return RigidTransform(rotation, translation,
from_frame=other_tf.from_frame,
to_frame=self.to_frame) | [
"def",
"dot",
"(",
"self",
",",
"other_tf",
")",
":",
"if",
"other_tf",
".",
"to_frame",
"!=",
"self",
".",
"from_frame",
":",
"raise",
"ValueError",
"(",
"'To frame of right hand side ({0}) must match from frame of left hand side ({1})'",
".",
"format",
"(",
"other_tf",
".",
"to_frame",
",",
"self",
".",
"from_frame",
")",
")",
"pose_tf",
"=",
"self",
".",
"matrix",
".",
"dot",
"(",
"other_tf",
".",
"matrix",
")",
"rotation",
",",
"translation",
"=",
"RigidTransform",
".",
"rotation_and_translation_from_matrix",
"(",
"pose_tf",
")",
"if",
"isinstance",
"(",
"other_tf",
",",
"SimilarityTransform",
")",
":",
"return",
"SimilarityTransform",
"(",
"self",
".",
"rotation",
",",
"self",
".",
"translation",
",",
"scale",
"=",
"1.0",
",",
"from_frame",
"=",
"self",
".",
"from_frame",
",",
"to_frame",
"=",
"self",
".",
"to_frame",
")",
"*",
"other_tf",
"return",
"RigidTransform",
"(",
"rotation",
",",
"translation",
",",
"from_frame",
"=",
"other_tf",
".",
"from_frame",
",",
"to_frame",
"=",
"self",
".",
"to_frame",
")"
]
| Compose this rigid transform with another.
This transform is on the left-hand side of the composition.
Parameters
----------
other_tf : :obj:`RigidTransform`
The other RigidTransform to compose with this one.
Returns
-------
:obj:`RigidTransform`
A RigidTransform that represents the composition.
Raises
------
ValueError
If the to_frame of other_tf is not identical to this transform's
from_frame. | [
"Compose",
"this",
"rigid",
"transform",
"with",
"another",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L394-L427 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.inverse | def inverse(self):
"""Take the inverse of the rigid transform.
Returns
-------
:obj:`RigidTransform`
The inverse of this RigidTransform.
"""
inv_rotation = self.rotation.T
inv_translation = np.dot(-self.rotation.T, self.translation)
return RigidTransform(inv_rotation, inv_translation,
from_frame=self._to_frame,
to_frame=self._from_frame) | python | def inverse(self):
"""Take the inverse of the rigid transform.
Returns
-------
:obj:`RigidTransform`
The inverse of this RigidTransform.
"""
inv_rotation = self.rotation.T
inv_translation = np.dot(-self.rotation.T, self.translation)
return RigidTransform(inv_rotation, inv_translation,
from_frame=self._to_frame,
to_frame=self._from_frame) | [
"def",
"inverse",
"(",
"self",
")",
":",
"inv_rotation",
"=",
"self",
".",
"rotation",
".",
"T",
"inv_translation",
"=",
"np",
".",
"dot",
"(",
"-",
"self",
".",
"rotation",
".",
"T",
",",
"self",
".",
"translation",
")",
"return",
"RigidTransform",
"(",
"inv_rotation",
",",
"inv_translation",
",",
"from_frame",
"=",
"self",
".",
"_to_frame",
",",
"to_frame",
"=",
"self",
".",
"_from_frame",
")"
]
| Take the inverse of the rigid transform.
Returns
-------
:obj:`RigidTransform`
The inverse of this RigidTransform. | [
"Take",
"the",
"inverse",
"of",
"the",
"rigid",
"transform",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L456-L468 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.save | def save(self, filename):
"""Save the RigidTransform to a file.
The file format is:
from_frame
to_frame
translation (space separated)
rotation_row_0 (space separated)
rotation_row_1 (space separated)
rotation_row_2 (space separated)
Parameters
----------
filename : :obj:`str`
The file to save the transform to.
Raises
------
ValueError
If filename's extension isn't .tf.
"""
file_root, file_ext = os.path.splitext(filename)
if file_ext.lower() != TF_EXTENSION:
raise ValueError('Extension %s not supported for RigidTransform. Must be stored with extension %s' %(file_ext, TF_EXTENSION))
f = open(filename, 'w')
f.write('%s\n' %(self._from_frame))
f.write('%s\n' %(self._to_frame))
f.write('%f %f %f\n' %(self._translation[0], self._translation[1], self._translation[2]))
f.write('%f %f %f\n' %(self._rotation[0, 0], self._rotation[0, 1], self._rotation[0, 2]))
f.write('%f %f %f\n' %(self._rotation[1, 0], self._rotation[1, 1], self._rotation[1, 2]))
f.write('%f %f %f\n' %(self._rotation[2, 0], self._rotation[2, 1], self._rotation[2, 2]))
f.close() | python | def save(self, filename):
"""Save the RigidTransform to a file.
The file format is:
from_frame
to_frame
translation (space separated)
rotation_row_0 (space separated)
rotation_row_1 (space separated)
rotation_row_2 (space separated)
Parameters
----------
filename : :obj:`str`
The file to save the transform to.
Raises
------
ValueError
If filename's extension isn't .tf.
"""
file_root, file_ext = os.path.splitext(filename)
if file_ext.lower() != TF_EXTENSION:
raise ValueError('Extension %s not supported for RigidTransform. Must be stored with extension %s' %(file_ext, TF_EXTENSION))
f = open(filename, 'w')
f.write('%s\n' %(self._from_frame))
f.write('%s\n' %(self._to_frame))
f.write('%f %f %f\n' %(self._translation[0], self._translation[1], self._translation[2]))
f.write('%f %f %f\n' %(self._rotation[0, 0], self._rotation[0, 1], self._rotation[0, 2]))
f.write('%f %f %f\n' %(self._rotation[1, 0], self._rotation[1, 1], self._rotation[1, 2]))
f.write('%f %f %f\n' %(self._rotation[2, 0], self._rotation[2, 1], self._rotation[2, 2]))
f.close() | [
"def",
"save",
"(",
"self",
",",
"filename",
")",
":",
"file_root",
",",
"file_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"if",
"file_ext",
".",
"lower",
"(",
")",
"!=",
"TF_EXTENSION",
":",
"raise",
"ValueError",
"(",
"'Extension %s not supported for RigidTransform. Must be stored with extension %s'",
"%",
"(",
"file_ext",
",",
"TF_EXTENSION",
")",
")",
"f",
"=",
"open",
"(",
"filename",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"'%s\\n'",
"%",
"(",
"self",
".",
"_from_frame",
")",
")",
"f",
".",
"write",
"(",
"'%s\\n'",
"%",
"(",
"self",
".",
"_to_frame",
")",
")",
"f",
".",
"write",
"(",
"'%f %f %f\\n'",
"%",
"(",
"self",
".",
"_translation",
"[",
"0",
"]",
",",
"self",
".",
"_translation",
"[",
"1",
"]",
",",
"self",
".",
"_translation",
"[",
"2",
"]",
")",
")",
"f",
".",
"write",
"(",
"'%f %f %f\\n'",
"%",
"(",
"self",
".",
"_rotation",
"[",
"0",
",",
"0",
"]",
",",
"self",
".",
"_rotation",
"[",
"0",
",",
"1",
"]",
",",
"self",
".",
"_rotation",
"[",
"0",
",",
"2",
"]",
")",
")",
"f",
".",
"write",
"(",
"'%f %f %f\\n'",
"%",
"(",
"self",
".",
"_rotation",
"[",
"1",
",",
"0",
"]",
",",
"self",
".",
"_rotation",
"[",
"1",
",",
"1",
"]",
",",
"self",
".",
"_rotation",
"[",
"1",
",",
"2",
"]",
")",
")",
"f",
".",
"write",
"(",
"'%f %f %f\\n'",
"%",
"(",
"self",
".",
"_rotation",
"[",
"2",
",",
"0",
"]",
",",
"self",
".",
"_rotation",
"[",
"2",
",",
"1",
"]",
",",
"self",
".",
"_rotation",
"[",
"2",
",",
"2",
"]",
")",
")",
"f",
".",
"close",
"(",
")"
]
| Save the RigidTransform to a file.
The file format is:
from_frame
to_frame
translation (space separated)
rotation_row_0 (space separated)
rotation_row_1 (space separated)
rotation_row_2 (space separated)
Parameters
----------
filename : :obj:`str`
The file to save the transform to.
Raises
------
ValueError
If filename's extension isn't .tf. | [
"Save",
"the",
"RigidTransform",
"to",
"a",
"file",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L470-L502 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.as_frames | def as_frames(self, from_frame, to_frame='world'):
"""Return a shallow copy of this rigid transform with just the frames
changed.
Parameters
----------
from_frame : :obj:`str`
The new from_frame.
to_frame : :obj:`str`
The new to_frame.
Returns
-------
:obj:`RigidTransform`
The RigidTransform with new frames.
"""
return RigidTransform(self.rotation, self.translation, from_frame, to_frame) | python | def as_frames(self, from_frame, to_frame='world'):
"""Return a shallow copy of this rigid transform with just the frames
changed.
Parameters
----------
from_frame : :obj:`str`
The new from_frame.
to_frame : :obj:`str`
The new to_frame.
Returns
-------
:obj:`RigidTransform`
The RigidTransform with new frames.
"""
return RigidTransform(self.rotation, self.translation, from_frame, to_frame) | [
"def",
"as_frames",
"(",
"self",
",",
"from_frame",
",",
"to_frame",
"=",
"'world'",
")",
":",
"return",
"RigidTransform",
"(",
"self",
".",
"rotation",
",",
"self",
".",
"translation",
",",
"from_frame",
",",
"to_frame",
")"
]
| Return a shallow copy of this rigid transform with just the frames
changed.
Parameters
----------
from_frame : :obj:`str`
The new from_frame.
to_frame : :obj:`str`
The new to_frame.
Returns
-------
:obj:`RigidTransform`
The RigidTransform with new frames. | [
"Return",
"a",
"shallow",
"copy",
"of",
"this",
"rigid",
"transform",
"with",
"just",
"the",
"frames",
"changed",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L504-L521 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.rotation_from_quaternion | def rotation_from_quaternion(q_wxyz):
"""Convert quaternion array to rotation matrix.
Parameters
----------
q_wxyz : :obj:`numpy.ndarray` of float
A quaternion in wxyz order.
Returns
-------
:obj:`numpy.ndarray` of float
A 3x3 rotation matrix made from the quaternion.
"""
q_xyzw = np.array([q_wxyz[1], q_wxyz[2], q_wxyz[3], q_wxyz[0]])
R = transformations.quaternion_matrix(q_xyzw)[:3,:3]
return R | python | def rotation_from_quaternion(q_wxyz):
"""Convert quaternion array to rotation matrix.
Parameters
----------
q_wxyz : :obj:`numpy.ndarray` of float
A quaternion in wxyz order.
Returns
-------
:obj:`numpy.ndarray` of float
A 3x3 rotation matrix made from the quaternion.
"""
q_xyzw = np.array([q_wxyz[1], q_wxyz[2], q_wxyz[3], q_wxyz[0]])
R = transformations.quaternion_matrix(q_xyzw)[:3,:3]
return R | [
"def",
"rotation_from_quaternion",
"(",
"q_wxyz",
")",
":",
"q_xyzw",
"=",
"np",
".",
"array",
"(",
"[",
"q_wxyz",
"[",
"1",
"]",
",",
"q_wxyz",
"[",
"2",
"]",
",",
"q_wxyz",
"[",
"3",
"]",
",",
"q_wxyz",
"[",
"0",
"]",
"]",
")",
"R",
"=",
"transformations",
".",
"quaternion_matrix",
"(",
"q_xyzw",
")",
"[",
":",
"3",
",",
":",
"3",
"]",
"return",
"R"
]
| Convert quaternion array to rotation matrix.
Parameters
----------
q_wxyz : :obj:`numpy.ndarray` of float
A quaternion in wxyz order.
Returns
-------
:obj:`numpy.ndarray` of float
A 3x3 rotation matrix made from the quaternion. | [
"Convert",
"quaternion",
"array",
"to",
"rotation",
"matrix",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L700-L715 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.quaternion_from_axis_angle | def quaternion_from_axis_angle(v):
"""Convert axis-angle representation to a quaternion vector.
Parameters
----------
v : :obj:`numpy.ndarray` of float
An axis-angle representation.
Returns
-------
:obj:`numpy.ndarray` of float
A quaternion vector from the axis-angle vector.
"""
theta = np.linalg.norm(v)
if theta > 0:
v = v / np.linalg.norm(v)
ax, ay, az = v
qx = ax * np.sin(0.5 * theta)
qy = ay * np.sin(0.5 * theta)
qz = az * np.sin(0.5 * theta)
qw = np.cos(0.5 * theta)
q = np.array([qw, qx, qy, qz])
return q | python | def quaternion_from_axis_angle(v):
"""Convert axis-angle representation to a quaternion vector.
Parameters
----------
v : :obj:`numpy.ndarray` of float
An axis-angle representation.
Returns
-------
:obj:`numpy.ndarray` of float
A quaternion vector from the axis-angle vector.
"""
theta = np.linalg.norm(v)
if theta > 0:
v = v / np.linalg.norm(v)
ax, ay, az = v
qx = ax * np.sin(0.5 * theta)
qy = ay * np.sin(0.5 * theta)
qz = az * np.sin(0.5 * theta)
qw = np.cos(0.5 * theta)
q = np.array([qw, qx, qy, qz])
return q | [
"def",
"quaternion_from_axis_angle",
"(",
"v",
")",
":",
"theta",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"v",
")",
"if",
"theta",
">",
"0",
":",
"v",
"=",
"v",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"v",
")",
"ax",
",",
"ay",
",",
"az",
"=",
"v",
"qx",
"=",
"ax",
"*",
"np",
".",
"sin",
"(",
"0.5",
"*",
"theta",
")",
"qy",
"=",
"ay",
"*",
"np",
".",
"sin",
"(",
"0.5",
"*",
"theta",
")",
"qz",
"=",
"az",
"*",
"np",
".",
"sin",
"(",
"0.5",
"*",
"theta",
")",
"qw",
"=",
"np",
".",
"cos",
"(",
"0.5",
"*",
"theta",
")",
"q",
"=",
"np",
".",
"array",
"(",
"[",
"qw",
",",
"qx",
",",
"qy",
",",
"qz",
"]",
")",
"return",
"q"
]
| Convert axis-angle representation to a quaternion vector.
Parameters
----------
v : :obj:`numpy.ndarray` of float
An axis-angle representation.
Returns
-------
:obj:`numpy.ndarray` of float
A quaternion vector from the axis-angle vector. | [
"Convert",
"axis",
"-",
"angle",
"representation",
"to",
"a",
"quaternion",
"vector",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L719-L741 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.transform_from_dual_quaternion | def transform_from_dual_quaternion(dq, from_frame='unassigned', to_frame='world'):
"""Create a RigidTransform from a DualQuaternion.
Parameters
----------
dq : :obj:`DualQuaternion`
The DualQuaternion to transform.
from_frame : :obj:`str`
A name for the frame of reference on which this transform
operates.
to_frame : :obj:`str`
A name for the frame of reference to which this transform
moves objects.
Returns
-------
:obj:`RigidTransform`
The RigidTransform made from the DualQuaternion.
"""
quaternion = dq.qr
translation = 2 * dq.qd[1:]
return RigidTransform(rotation=quaternion, translation=translation, from_frame=from_frame, to_frame=to_frame) | python | def transform_from_dual_quaternion(dq, from_frame='unassigned', to_frame='world'):
"""Create a RigidTransform from a DualQuaternion.
Parameters
----------
dq : :obj:`DualQuaternion`
The DualQuaternion to transform.
from_frame : :obj:`str`
A name for the frame of reference on which this transform
operates.
to_frame : :obj:`str`
A name for the frame of reference to which this transform
moves objects.
Returns
-------
:obj:`RigidTransform`
The RigidTransform made from the DualQuaternion.
"""
quaternion = dq.qr
translation = 2 * dq.qd[1:]
return RigidTransform(rotation=quaternion, translation=translation, from_frame=from_frame, to_frame=to_frame) | [
"def",
"transform_from_dual_quaternion",
"(",
"dq",
",",
"from_frame",
"=",
"'unassigned'",
",",
"to_frame",
"=",
"'world'",
")",
":",
"quaternion",
"=",
"dq",
".",
"qr",
"translation",
"=",
"2",
"*",
"dq",
".",
"qd",
"[",
"1",
":",
"]",
"return",
"RigidTransform",
"(",
"rotation",
"=",
"quaternion",
",",
"translation",
"=",
"translation",
",",
"from_frame",
"=",
"from_frame",
",",
"to_frame",
"=",
"to_frame",
")"
]
| Create a RigidTransform from a DualQuaternion.
Parameters
----------
dq : :obj:`DualQuaternion`
The DualQuaternion to transform.
from_frame : :obj:`str`
A name for the frame of reference on which this transform
operates.
to_frame : :obj:`str`
A name for the frame of reference to which this transform
moves objects.
Returns
-------
:obj:`RigidTransform`
The RigidTransform made from the DualQuaternion. | [
"Create",
"a",
"RigidTransform",
"from",
"a",
"DualQuaternion",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L760-L783 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.rotation_and_translation_from_matrix | def rotation_and_translation_from_matrix(matrix):
"""Helper to convert 4x4 matrix to rotation matrix and translation vector.
Parameters
----------
matrix : :obj:`numpy.ndarray` of float
4x4 rigid transformation matrix to be converted.
Returns
-------
:obj:`tuple` of :obj:`numpy.ndarray` of float
A 3x3 rotation matrix and a 3-entry translation vector.
Raises
------
ValueError
If the incoming matrix isn't a 4x4 ndarray.
"""
if not isinstance(matrix, np.ndarray) or \
matrix.shape[0] != 4 or matrix.shape[1] != 4:
raise ValueError('Matrix must be specified as a 4x4 ndarray')
rotation = matrix[:3,:3]
translation = matrix[:3,3]
return rotation, translation | python | def rotation_and_translation_from_matrix(matrix):
"""Helper to convert 4x4 matrix to rotation matrix and translation vector.
Parameters
----------
matrix : :obj:`numpy.ndarray` of float
4x4 rigid transformation matrix to be converted.
Returns
-------
:obj:`tuple` of :obj:`numpy.ndarray` of float
A 3x3 rotation matrix and a 3-entry translation vector.
Raises
------
ValueError
If the incoming matrix isn't a 4x4 ndarray.
"""
if not isinstance(matrix, np.ndarray) or \
matrix.shape[0] != 4 or matrix.shape[1] != 4:
raise ValueError('Matrix must be specified as a 4x4 ndarray')
rotation = matrix[:3,:3]
translation = matrix[:3,3]
return rotation, translation | [
"def",
"rotation_and_translation_from_matrix",
"(",
"matrix",
")",
":",
"if",
"not",
"isinstance",
"(",
"matrix",
",",
"np",
".",
"ndarray",
")",
"or",
"matrix",
".",
"shape",
"[",
"0",
"]",
"!=",
"4",
"or",
"matrix",
".",
"shape",
"[",
"1",
"]",
"!=",
"4",
":",
"raise",
"ValueError",
"(",
"'Matrix must be specified as a 4x4 ndarray'",
")",
"rotation",
"=",
"matrix",
"[",
":",
"3",
",",
":",
"3",
"]",
"translation",
"=",
"matrix",
"[",
":",
"3",
",",
"3",
"]",
"return",
"rotation",
",",
"translation"
]
| Helper to convert 4x4 matrix to rotation matrix and translation vector.
Parameters
----------
matrix : :obj:`numpy.ndarray` of float
4x4 rigid transformation matrix to be converted.
Returns
-------
:obj:`tuple` of :obj:`numpy.ndarray` of float
A 3x3 rotation matrix and a 3-entry translation vector.
Raises
------
ValueError
If the incoming matrix isn't a 4x4 ndarray. | [
"Helper",
"to",
"convert",
"4x4",
"matrix",
"to",
"rotation",
"matrix",
"and",
"translation",
"vector",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L786-L809 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.rotation_from_axis_and_origin | def rotation_from_axis_and_origin(axis, origin, angle, to_frame='world'):
"""
Returns a rotation matrix around some arbitrary axis, about the point origin, using Rodrigues Formula
Parameters
----------
axis : :obj:`numpy.ndarray` of float
3x1 vector representing which axis we should be rotating about
origin : :obj:`numpy.ndarray` of float
3x1 vector representing where the rotation should be centered around
angle : float
how much to rotate (in radians)
to_frame : :obj:`str`
A name for the frame of reference to which this transform
moves objects.
"""
axis_hat = np.array([[0, -axis[2], axis[1]],
[axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]])
# Rodrigues Formula
R = RigidTransform(
np.eye(3) + np.sin(angle) * axis_hat + (1 - np.cos(angle)) * axis_hat.dot(axis_hat),
from_frame=to_frame,
to_frame=to_frame
)
return RigidTransform(translation=origin, from_frame=to_frame, to_frame=to_frame) \
.dot(R) \
.dot(RigidTransform(translation=-origin, from_frame=to_frame, to_frame=to_frame)) | python | def rotation_from_axis_and_origin(axis, origin, angle, to_frame='world'):
"""
Returns a rotation matrix around some arbitrary axis, about the point origin, using Rodrigues Formula
Parameters
----------
axis : :obj:`numpy.ndarray` of float
3x1 vector representing which axis we should be rotating about
origin : :obj:`numpy.ndarray` of float
3x1 vector representing where the rotation should be centered around
angle : float
how much to rotate (in radians)
to_frame : :obj:`str`
A name for the frame of reference to which this transform
moves objects.
"""
axis_hat = np.array([[0, -axis[2], axis[1]],
[axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]])
# Rodrigues Formula
R = RigidTransform(
np.eye(3) + np.sin(angle) * axis_hat + (1 - np.cos(angle)) * axis_hat.dot(axis_hat),
from_frame=to_frame,
to_frame=to_frame
)
return RigidTransform(translation=origin, from_frame=to_frame, to_frame=to_frame) \
.dot(R) \
.dot(RigidTransform(translation=-origin, from_frame=to_frame, to_frame=to_frame)) | [
"def",
"rotation_from_axis_and_origin",
"(",
"axis",
",",
"origin",
",",
"angle",
",",
"to_frame",
"=",
"'world'",
")",
":",
"axis_hat",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"-",
"axis",
"[",
"2",
"]",
",",
"axis",
"[",
"1",
"]",
"]",
",",
"[",
"axis",
"[",
"2",
"]",
",",
"0",
",",
"-",
"axis",
"[",
"0",
"]",
"]",
",",
"[",
"-",
"axis",
"[",
"1",
"]",
",",
"axis",
"[",
"0",
"]",
",",
"0",
"]",
"]",
")",
"# Rodrigues Formula",
"R",
"=",
"RigidTransform",
"(",
"np",
".",
"eye",
"(",
"3",
")",
"+",
"np",
".",
"sin",
"(",
"angle",
")",
"*",
"axis_hat",
"+",
"(",
"1",
"-",
"np",
".",
"cos",
"(",
"angle",
")",
")",
"*",
"axis_hat",
".",
"dot",
"(",
"axis_hat",
")",
",",
"from_frame",
"=",
"to_frame",
",",
"to_frame",
"=",
"to_frame",
")",
"return",
"RigidTransform",
"(",
"translation",
"=",
"origin",
",",
"from_frame",
"=",
"to_frame",
",",
"to_frame",
"=",
"to_frame",
")",
".",
"dot",
"(",
"R",
")",
".",
"dot",
"(",
"RigidTransform",
"(",
"translation",
"=",
"-",
"origin",
",",
"from_frame",
"=",
"to_frame",
",",
"to_frame",
"=",
"to_frame",
")",
")"
]
| Returns a rotation matrix around some arbitrary axis, about the point origin, using Rodrigues Formula
Parameters
----------
axis : :obj:`numpy.ndarray` of float
3x1 vector representing which axis we should be rotating about
origin : :obj:`numpy.ndarray` of float
3x1 vector representing where the rotation should be centered around
angle : float
how much to rotate (in radians)
to_frame : :obj:`str`
A name for the frame of reference to which this transform
moves objects. | [
"Returns",
"a",
"rotation",
"matrix",
"around",
"some",
"arbitrary",
"axis",
"about",
"the",
"point",
"origin",
"using",
"Rodrigues",
"Formula"
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L812-L840 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.x_axis_rotation | def x_axis_rotation(theta):
"""Generates a 3x3 rotation matrix for a rotation of angle
theta about the x axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix.
"""
R = np.array([[1, 0, 0,],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]])
return R | python | def x_axis_rotation(theta):
"""Generates a 3x3 rotation matrix for a rotation of angle
theta about the x axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix.
"""
R = np.array([[1, 0, 0,],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]])
return R | [
"def",
"x_axis_rotation",
"(",
"theta",
")",
":",
"R",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"0",
",",
"0",
",",
"]",
",",
"[",
"0",
",",
"np",
".",
"cos",
"(",
"theta",
")",
",",
"-",
"np",
".",
"sin",
"(",
"theta",
")",
"]",
",",
"[",
"0",
",",
"np",
".",
"sin",
"(",
"theta",
")",
",",
"np",
".",
"cos",
"(",
"theta",
")",
"]",
"]",
")",
"return",
"R"
]
| Generates a 3x3 rotation matrix for a rotation of angle
theta about the x axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix. | [
"Generates",
"a",
"3x3",
"rotation",
"matrix",
"for",
"a",
"rotation",
"of",
"angle",
"theta",
"about",
"the",
"x",
"axis",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L843-L860 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.y_axis_rotation | def y_axis_rotation(theta):
"""Generates a 3x3 rotation matrix for a rotation of angle
theta about the y axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix.
"""
R = np.array([[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]])
return R | python | def y_axis_rotation(theta):
"""Generates a 3x3 rotation matrix for a rotation of angle
theta about the y axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix.
"""
R = np.array([[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]])
return R | [
"def",
"y_axis_rotation",
"(",
"theta",
")",
":",
"R",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"np",
".",
"cos",
"(",
"theta",
")",
",",
"0",
",",
"np",
".",
"sin",
"(",
"theta",
")",
"]",
",",
"[",
"0",
",",
"1",
",",
"0",
"]",
",",
"[",
"-",
"np",
".",
"sin",
"(",
"theta",
")",
",",
"0",
",",
"np",
".",
"cos",
"(",
"theta",
")",
"]",
"]",
")",
"return",
"R"
]
| Generates a 3x3 rotation matrix for a rotation of angle
theta about the y axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix. | [
"Generates",
"a",
"3x3",
"rotation",
"matrix",
"for",
"a",
"rotation",
"of",
"angle",
"theta",
"about",
"the",
"y",
"axis",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L863-L880 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.z_axis_rotation | def z_axis_rotation(theta):
"""Generates a 3x3 rotation matrix for a rotation of angle
theta about the z axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix.
"""
R = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
return R | python | def z_axis_rotation(theta):
"""Generates a 3x3 rotation matrix for a rotation of angle
theta about the z axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix.
"""
R = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
return R | [
"def",
"z_axis_rotation",
"(",
"theta",
")",
":",
"R",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"np",
".",
"cos",
"(",
"theta",
")",
",",
"-",
"np",
".",
"sin",
"(",
"theta",
")",
",",
"0",
"]",
",",
"[",
"np",
".",
"sin",
"(",
"theta",
")",
",",
"np",
".",
"cos",
"(",
"theta",
")",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"1",
"]",
"]",
")",
"return",
"R"
]
| Generates a 3x3 rotation matrix for a rotation of angle
theta about the z axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix. | [
"Generates",
"a",
"3x3",
"rotation",
"matrix",
"for",
"a",
"rotation",
"of",
"angle",
"theta",
"about",
"the",
"z",
"axis",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L883-L900 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.random_rotation | def random_rotation():
"""Generates a random 3x3 rotation matrix with SVD.
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix.
"""
rand_seed = np.random.rand(3, 3)
U, S, V = np.linalg.svd(rand_seed)
return U | python | def random_rotation():
"""Generates a random 3x3 rotation matrix with SVD.
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix.
"""
rand_seed = np.random.rand(3, 3)
U, S, V = np.linalg.svd(rand_seed)
return U | [
"def",
"random_rotation",
"(",
")",
":",
"rand_seed",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"3",
",",
"3",
")",
"U",
",",
"S",
",",
"V",
"=",
"np",
".",
"linalg",
".",
"svd",
"(",
"rand_seed",
")",
"return",
"U"
]
| Generates a random 3x3 rotation matrix with SVD.
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix. | [
"Generates",
"a",
"random",
"3x3",
"rotation",
"matrix",
"with",
"SVD",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L903-L913 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.rotation_from_axes | def rotation_from_axes(x_axis, y_axis, z_axis):
"""Convert specification of axis in target frame to
a rotation matrix from source to target frame.
Parameters
----------
x_axis : :obj:`numpy.ndarray` of float
A normalized 3-vector for the target frame's x-axis.
y_axis : :obj:`numpy.ndarray` of float
A normalized 3-vector for the target frame's y-axis.
z_axis : :obj:`numpy.ndarray` of float
A normalized 3-vector for the target frame's z-axis.
Returns
-------
:obj:`numpy.ndarray` of float
A 3x3 rotation matrix that transforms from a source frame to the
given target frame.
"""
return np.hstack((x_axis[:,np.newaxis], y_axis[:,np.newaxis], z_axis[:,np.newaxis])) | python | def rotation_from_axes(x_axis, y_axis, z_axis):
"""Convert specification of axis in target frame to
a rotation matrix from source to target frame.
Parameters
----------
x_axis : :obj:`numpy.ndarray` of float
A normalized 3-vector for the target frame's x-axis.
y_axis : :obj:`numpy.ndarray` of float
A normalized 3-vector for the target frame's y-axis.
z_axis : :obj:`numpy.ndarray` of float
A normalized 3-vector for the target frame's z-axis.
Returns
-------
:obj:`numpy.ndarray` of float
A 3x3 rotation matrix that transforms from a source frame to the
given target frame.
"""
return np.hstack((x_axis[:,np.newaxis], y_axis[:,np.newaxis], z_axis[:,np.newaxis])) | [
"def",
"rotation_from_axes",
"(",
"x_axis",
",",
"y_axis",
",",
"z_axis",
")",
":",
"return",
"np",
".",
"hstack",
"(",
"(",
"x_axis",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"y_axis",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"z_axis",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
")",
")"
]
| Convert specification of axis in target frame to
a rotation matrix from source to target frame.
Parameters
----------
x_axis : :obj:`numpy.ndarray` of float
A normalized 3-vector for the target frame's x-axis.
y_axis : :obj:`numpy.ndarray` of float
A normalized 3-vector for the target frame's y-axis.
z_axis : :obj:`numpy.ndarray` of float
A normalized 3-vector for the target frame's z-axis.
Returns
-------
:obj:`numpy.ndarray` of float
A 3x3 rotation matrix that transforms from a source frame to the
given target frame. | [
"Convert",
"specification",
"of",
"axis",
"in",
"target",
"frame",
"to",
"a",
"rotation",
"matrix",
"from",
"source",
"to",
"target",
"frame",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L927-L948 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.interpolate | def interpolate(T0, T1, t):
"""Return an interpolation of two RigidTransforms.
Parameters
----------
T0 : :obj:`RigidTransform`
The first RigidTransform to interpolate.
T1 : :obj:`RigidTransform`
The second RigidTransform to interpolate.
t : float
The interpolation step in [0,1]. 0 favors T0, 1 favors T1.
Returns
-------
:obj:`RigidTransform`
The interpolated RigidTransform.
Raises
------
ValueError
If the to_frame of the two RigidTransforms are not identical.
"""
if T0.to_frame != T1.to_frame:
raise ValueError('Cannot interpolate between 2 transforms with different to frames! Got T1 {0} and T2 {1}'.format(T0.to_frame, T1.to_frame))
dq0 = T0.dual_quaternion
dq1 = T1.dual_quaternion
dqt = DualQuaternion.interpolate(dq0, dq1, t)
from_frame = "{0}_{1}_{2}".format(T0.from_frame, T1.from_frame, t)
return RigidTransform.transform_from_dual_quaternion(dqt, from_frame, T0.to_frame) | python | def interpolate(T0, T1, t):
"""Return an interpolation of two RigidTransforms.
Parameters
----------
T0 : :obj:`RigidTransform`
The first RigidTransform to interpolate.
T1 : :obj:`RigidTransform`
The second RigidTransform to interpolate.
t : float
The interpolation step in [0,1]. 0 favors T0, 1 favors T1.
Returns
-------
:obj:`RigidTransform`
The interpolated RigidTransform.
Raises
------
ValueError
If the to_frame of the two RigidTransforms are not identical.
"""
if T0.to_frame != T1.to_frame:
raise ValueError('Cannot interpolate between 2 transforms with different to frames! Got T1 {0} and T2 {1}'.format(T0.to_frame, T1.to_frame))
dq0 = T0.dual_quaternion
dq1 = T1.dual_quaternion
dqt = DualQuaternion.interpolate(dq0, dq1, t)
from_frame = "{0}_{1}_{2}".format(T0.from_frame, T1.from_frame, t)
return RigidTransform.transform_from_dual_quaternion(dqt, from_frame, T0.to_frame) | [
"def",
"interpolate",
"(",
"T0",
",",
"T1",
",",
"t",
")",
":",
"if",
"T0",
".",
"to_frame",
"!=",
"T1",
".",
"to_frame",
":",
"raise",
"ValueError",
"(",
"'Cannot interpolate between 2 transforms with different to frames! Got T1 {0} and T2 {1}'",
".",
"format",
"(",
"T0",
".",
"to_frame",
",",
"T1",
".",
"to_frame",
")",
")",
"dq0",
"=",
"T0",
".",
"dual_quaternion",
"dq1",
"=",
"T1",
".",
"dual_quaternion",
"dqt",
"=",
"DualQuaternion",
".",
"interpolate",
"(",
"dq0",
",",
"dq1",
",",
"t",
")",
"from_frame",
"=",
"\"{0}_{1}_{2}\"",
".",
"format",
"(",
"T0",
".",
"from_frame",
",",
"T1",
".",
"from_frame",
",",
"t",
")",
"return",
"RigidTransform",
".",
"transform_from_dual_quaternion",
"(",
"dqt",
",",
"from_frame",
",",
"T0",
".",
"to_frame",
")"
]
| Return an interpolation of two RigidTransforms.
Parameters
----------
T0 : :obj:`RigidTransform`
The first RigidTransform to interpolate.
T1 : :obj:`RigidTransform`
The second RigidTransform to interpolate.
t : float
The interpolation step in [0,1]. 0 favors T0, 1 favors T1.
Returns
-------
:obj:`RigidTransform`
The interpolated RigidTransform.
Raises
------
ValueError
If the to_frame of the two RigidTransforms are not identical. | [
"Return",
"an",
"interpolation",
"of",
"two",
"RigidTransforms",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L973-L1004 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | RigidTransform.load | def load(filename):
"""Load a RigidTransform from a file.
The file format is:
from_frame
to_frame
translation (space separated)
rotation_row_0 (space separated)
rotation_row_1 (space separated)
rotation_row_2 (space separated)
Parameters
----------
filename : :obj:`str`
The file to load the transform from.
Returns
-------
:obj:`RigidTransform`
The RigidTransform read from the file.
Raises
------
ValueError
If filename's extension isn't .tf.
"""
file_root, file_ext = os.path.splitext(filename)
if file_ext.lower() != TF_EXTENSION:
raise ValueError('Extension %s not supported for RigidTransform. Can only load extension %s' %(file_ext, TF_EXTENSION))
f = open(filename, 'r')
lines = list(f)
from_frame = lines[0][:-1]
to_frame = lines[1][:-1]
t = np.zeros(3)
t_tokens = lines[2][:-1].split()
t[0] = float(t_tokens[0])
t[1] = float(t_tokens[1])
t[2] = float(t_tokens[2])
R = np.zeros([3,3])
r_tokens = lines[3][:-1].split()
R[0, 0] = float(r_tokens[0])
R[0, 1] = float(r_tokens[1])
R[0, 2] = float(r_tokens[2])
r_tokens = lines[4][:-1].split()
R[1, 0] = float(r_tokens[0])
R[1, 1] = float(r_tokens[1])
R[1, 2] = float(r_tokens[2])
r_tokens = lines[5][:-1].split()
R[2, 0] = float(r_tokens[0])
R[2, 1] = float(r_tokens[1])
R[2, 2] = float(r_tokens[2])
f.close()
return RigidTransform(rotation=R, translation=t,
from_frame=from_frame,
to_frame=to_frame) | python | def load(filename):
"""Load a RigidTransform from a file.
The file format is:
from_frame
to_frame
translation (space separated)
rotation_row_0 (space separated)
rotation_row_1 (space separated)
rotation_row_2 (space separated)
Parameters
----------
filename : :obj:`str`
The file to load the transform from.
Returns
-------
:obj:`RigidTransform`
The RigidTransform read from the file.
Raises
------
ValueError
If filename's extension isn't .tf.
"""
file_root, file_ext = os.path.splitext(filename)
if file_ext.lower() != TF_EXTENSION:
raise ValueError('Extension %s not supported for RigidTransform. Can only load extension %s' %(file_ext, TF_EXTENSION))
f = open(filename, 'r')
lines = list(f)
from_frame = lines[0][:-1]
to_frame = lines[1][:-1]
t = np.zeros(3)
t_tokens = lines[2][:-1].split()
t[0] = float(t_tokens[0])
t[1] = float(t_tokens[1])
t[2] = float(t_tokens[2])
R = np.zeros([3,3])
r_tokens = lines[3][:-1].split()
R[0, 0] = float(r_tokens[0])
R[0, 1] = float(r_tokens[1])
R[0, 2] = float(r_tokens[2])
r_tokens = lines[4][:-1].split()
R[1, 0] = float(r_tokens[0])
R[1, 1] = float(r_tokens[1])
R[1, 2] = float(r_tokens[2])
r_tokens = lines[5][:-1].split()
R[2, 0] = float(r_tokens[0])
R[2, 1] = float(r_tokens[1])
R[2, 2] = float(r_tokens[2])
f.close()
return RigidTransform(rotation=R, translation=t,
from_frame=from_frame,
to_frame=to_frame) | [
"def",
"load",
"(",
"filename",
")",
":",
"file_root",
",",
"file_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"if",
"file_ext",
".",
"lower",
"(",
")",
"!=",
"TF_EXTENSION",
":",
"raise",
"ValueError",
"(",
"'Extension %s not supported for RigidTransform. Can only load extension %s'",
"%",
"(",
"file_ext",
",",
"TF_EXTENSION",
")",
")",
"f",
"=",
"open",
"(",
"filename",
",",
"'r'",
")",
"lines",
"=",
"list",
"(",
"f",
")",
"from_frame",
"=",
"lines",
"[",
"0",
"]",
"[",
":",
"-",
"1",
"]",
"to_frame",
"=",
"lines",
"[",
"1",
"]",
"[",
":",
"-",
"1",
"]",
"t",
"=",
"np",
".",
"zeros",
"(",
"3",
")",
"t_tokens",
"=",
"lines",
"[",
"2",
"]",
"[",
":",
"-",
"1",
"]",
".",
"split",
"(",
")",
"t",
"[",
"0",
"]",
"=",
"float",
"(",
"t_tokens",
"[",
"0",
"]",
")",
"t",
"[",
"1",
"]",
"=",
"float",
"(",
"t_tokens",
"[",
"1",
"]",
")",
"t",
"[",
"2",
"]",
"=",
"float",
"(",
"t_tokens",
"[",
"2",
"]",
")",
"R",
"=",
"np",
".",
"zeros",
"(",
"[",
"3",
",",
"3",
"]",
")",
"r_tokens",
"=",
"lines",
"[",
"3",
"]",
"[",
":",
"-",
"1",
"]",
".",
"split",
"(",
")",
"R",
"[",
"0",
",",
"0",
"]",
"=",
"float",
"(",
"r_tokens",
"[",
"0",
"]",
")",
"R",
"[",
"0",
",",
"1",
"]",
"=",
"float",
"(",
"r_tokens",
"[",
"1",
"]",
")",
"R",
"[",
"0",
",",
"2",
"]",
"=",
"float",
"(",
"r_tokens",
"[",
"2",
"]",
")",
"r_tokens",
"=",
"lines",
"[",
"4",
"]",
"[",
":",
"-",
"1",
"]",
".",
"split",
"(",
")",
"R",
"[",
"1",
",",
"0",
"]",
"=",
"float",
"(",
"r_tokens",
"[",
"0",
"]",
")",
"R",
"[",
"1",
",",
"1",
"]",
"=",
"float",
"(",
"r_tokens",
"[",
"1",
"]",
")",
"R",
"[",
"1",
",",
"2",
"]",
"=",
"float",
"(",
"r_tokens",
"[",
"2",
"]",
")",
"r_tokens",
"=",
"lines",
"[",
"5",
"]",
"[",
":",
"-",
"1",
"]",
".",
"split",
"(",
")",
"R",
"[",
"2",
",",
"0",
"]",
"=",
"float",
"(",
"r_tokens",
"[",
"0",
"]",
")",
"R",
"[",
"2",
",",
"1",
"]",
"=",
"float",
"(",
"r_tokens",
"[",
"1",
"]",
")",
"R",
"[",
"2",
",",
"2",
"]",
"=",
"float",
"(",
"r_tokens",
"[",
"2",
"]",
")",
"f",
".",
"close",
"(",
")",
"return",
"RigidTransform",
"(",
"rotation",
"=",
"R",
",",
"translation",
"=",
"t",
",",
"from_frame",
"=",
"from_frame",
",",
"to_frame",
"=",
"to_frame",
")"
]
| Load a RigidTransform from a file.
The file format is:
from_frame
to_frame
translation (space separated)
rotation_row_0 (space separated)
rotation_row_1 (space separated)
rotation_row_2 (space separated)
Parameters
----------
filename : :obj:`str`
The file to load the transform from.
Returns
-------
:obj:`RigidTransform`
The RigidTransform read from the file.
Raises
------
ValueError
If filename's extension isn't .tf. | [
"Load",
"a",
"RigidTransform",
"from",
"a",
"file",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L1007-L1066 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | SimilarityTransform.dot | def dot(self, other_tf):
"""Compose this simliarity transform with another.
This transform is on the left-hand side of the composition.
Parameters
----------
other_tf : :obj:`SimilarityTransform`
The other SimilarityTransform to compose with this one.
Returns
-------
:obj:`SimilarityTransform`
A SimilarityTransform that represents the composition.
Raises
------
ValueError
If the to_frame of other_tf is not identical to this transform's
from_frame.
"""
if other_tf.to_frame != self.from_frame:
raise ValueError('To frame of right hand side ({0}) must match from frame of left hand side ({1})'.format(other_tf.to_frame, self.from_frame))
if not isinstance(other_tf, RigidTransform):
raise ValueError('Can only compose with other RigidTransform classes')
other_scale = 1.0
if isinstance(other_tf, SimilarityTransform):
other_scale = other_tf.scale
rotation = self.rotation.dot(other_tf.rotation)
translation = self.translation + self.scale * self.rotation.dot(other_tf.translation)
scale = self.scale * other_scale
return SimilarityTransform(rotation, translation, scale,
from_frame=other_tf.from_frame,
to_frame=self.to_frame) | python | def dot(self, other_tf):
"""Compose this simliarity transform with another.
This transform is on the left-hand side of the composition.
Parameters
----------
other_tf : :obj:`SimilarityTransform`
The other SimilarityTransform to compose with this one.
Returns
-------
:obj:`SimilarityTransform`
A SimilarityTransform that represents the composition.
Raises
------
ValueError
If the to_frame of other_tf is not identical to this transform's
from_frame.
"""
if other_tf.to_frame != self.from_frame:
raise ValueError('To frame of right hand side ({0}) must match from frame of left hand side ({1})'.format(other_tf.to_frame, self.from_frame))
if not isinstance(other_tf, RigidTransform):
raise ValueError('Can only compose with other RigidTransform classes')
other_scale = 1.0
if isinstance(other_tf, SimilarityTransform):
other_scale = other_tf.scale
rotation = self.rotation.dot(other_tf.rotation)
translation = self.translation + self.scale * self.rotation.dot(other_tf.translation)
scale = self.scale * other_scale
return SimilarityTransform(rotation, translation, scale,
from_frame=other_tf.from_frame,
to_frame=self.to_frame) | [
"def",
"dot",
"(",
"self",
",",
"other_tf",
")",
":",
"if",
"other_tf",
".",
"to_frame",
"!=",
"self",
".",
"from_frame",
":",
"raise",
"ValueError",
"(",
"'To frame of right hand side ({0}) must match from frame of left hand side ({1})'",
".",
"format",
"(",
"other_tf",
".",
"to_frame",
",",
"self",
".",
"from_frame",
")",
")",
"if",
"not",
"isinstance",
"(",
"other_tf",
",",
"RigidTransform",
")",
":",
"raise",
"ValueError",
"(",
"'Can only compose with other RigidTransform classes'",
")",
"other_scale",
"=",
"1.0",
"if",
"isinstance",
"(",
"other_tf",
",",
"SimilarityTransform",
")",
":",
"other_scale",
"=",
"other_tf",
".",
"scale",
"rotation",
"=",
"self",
".",
"rotation",
".",
"dot",
"(",
"other_tf",
".",
"rotation",
")",
"translation",
"=",
"self",
".",
"translation",
"+",
"self",
".",
"scale",
"*",
"self",
".",
"rotation",
".",
"dot",
"(",
"other_tf",
".",
"translation",
")",
"scale",
"=",
"self",
".",
"scale",
"*",
"other_scale",
"return",
"SimilarityTransform",
"(",
"rotation",
",",
"translation",
",",
"scale",
",",
"from_frame",
"=",
"other_tf",
".",
"from_frame",
",",
"to_frame",
"=",
"self",
".",
"to_frame",
")"
]
| Compose this simliarity transform with another.
This transform is on the left-hand side of the composition.
Parameters
----------
other_tf : :obj:`SimilarityTransform`
The other SimilarityTransform to compose with this one.
Returns
-------
:obj:`SimilarityTransform`
A SimilarityTransform that represents the composition.
Raises
------
ValueError
If the to_frame of other_tf is not identical to this transform's
from_frame. | [
"Compose",
"this",
"simliarity",
"transform",
"with",
"another",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L1187-L1222 | train |
BerkeleyAutomation/autolab_core | autolab_core/rigid_transformations.py | SimilarityTransform.inverse | def inverse(self):
"""Take the inverse of the similarity transform.
Returns
-------
:obj:`SimilarityTransform`
The inverse of this SimilarityTransform.
"""
inv_rot = np.linalg.inv(self.rotation)
inv_scale = 1.0 / self.scale
inv_trans = -inv_scale * inv_rot.dot(self.translation)
return SimilarityTransform(inv_rot, inv_trans, inv_scale,
from_frame=self._to_frame,
to_frame=self._from_frame) | python | def inverse(self):
"""Take the inverse of the similarity transform.
Returns
-------
:obj:`SimilarityTransform`
The inverse of this SimilarityTransform.
"""
inv_rot = np.linalg.inv(self.rotation)
inv_scale = 1.0 / self.scale
inv_trans = -inv_scale * inv_rot.dot(self.translation)
return SimilarityTransform(inv_rot, inv_trans, inv_scale,
from_frame=self._to_frame,
to_frame=self._from_frame) | [
"def",
"inverse",
"(",
"self",
")",
":",
"inv_rot",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"self",
".",
"rotation",
")",
"inv_scale",
"=",
"1.0",
"/",
"self",
".",
"scale",
"inv_trans",
"=",
"-",
"inv_scale",
"*",
"inv_rot",
".",
"dot",
"(",
"self",
".",
"translation",
")",
"return",
"SimilarityTransform",
"(",
"inv_rot",
",",
"inv_trans",
",",
"inv_scale",
",",
"from_frame",
"=",
"self",
".",
"_to_frame",
",",
"to_frame",
"=",
"self",
".",
"_from_frame",
")"
]
| Take the inverse of the similarity transform.
Returns
-------
:obj:`SimilarityTransform`
The inverse of this SimilarityTransform. | [
"Take",
"the",
"inverse",
"of",
"the",
"similarity",
"transform",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L1224-L1237 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | BagOfPoints.save | def save(self, filename):
"""Saves the collection to a file.
Parameters
----------
filename : :obj:`str`
The file to save the collection to.
Raises
------
ValueError
If the file extension is not .npy or .npz.
"""
file_root, file_ext = os.path.splitext(filename)
if file_ext == '.npy':
np.save(filename, self._data)
elif file_ext == '.npz':
np.savez_compressed(filename, self._data)
else:
raise ValueError('Extension %s not supported for point saves.' %(file_ext)) | python | def save(self, filename):
"""Saves the collection to a file.
Parameters
----------
filename : :obj:`str`
The file to save the collection to.
Raises
------
ValueError
If the file extension is not .npy or .npz.
"""
file_root, file_ext = os.path.splitext(filename)
if file_ext == '.npy':
np.save(filename, self._data)
elif file_ext == '.npz':
np.savez_compressed(filename, self._data)
else:
raise ValueError('Extension %s not supported for point saves.' %(file_ext)) | [
"def",
"save",
"(",
"self",
",",
"filename",
")",
":",
"file_root",
",",
"file_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"if",
"file_ext",
"==",
"'.npy'",
":",
"np",
".",
"save",
"(",
"filename",
",",
"self",
".",
"_data",
")",
"elif",
"file_ext",
"==",
"'.npz'",
":",
"np",
".",
"savez_compressed",
"(",
"filename",
",",
"self",
".",
"_data",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Extension %s not supported for point saves.'",
"%",
"(",
"file_ext",
")",
")"
]
| Saves the collection to a file.
Parameters
----------
filename : :obj:`str`
The file to save the collection to.
Raises
------
ValueError
If the file extension is not .npy or .npz. | [
"Saves",
"the",
"collection",
"to",
"a",
"file",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L112-L131 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | BagOfPoints.load_data | def load_data(filename):
"""Loads data from a file.
Parameters
----------
filename : :obj:`str`
The file to load the collection from.
Returns
-------
:obj:`numpy.ndarray` of float
The data read from the file.
Raises
------
ValueError
If the file extension is not .npy or .npz.
"""
file_root, file_ext = os.path.splitext(filename)
data = None
if file_ext == '.npy':
data = np.load(filename)
elif file_ext == '.npz':
data = np.load(filename)['arr_0']
else:
raise ValueError('Extension %s not supported for point reads' %(file_ext))
return data | python | def load_data(filename):
"""Loads data from a file.
Parameters
----------
filename : :obj:`str`
The file to load the collection from.
Returns
-------
:obj:`numpy.ndarray` of float
The data read from the file.
Raises
------
ValueError
If the file extension is not .npy or .npz.
"""
file_root, file_ext = os.path.splitext(filename)
data = None
if file_ext == '.npy':
data = np.load(filename)
elif file_ext == '.npz':
data = np.load(filename)['arr_0']
else:
raise ValueError('Extension %s not supported for point reads' %(file_ext))
return data | [
"def",
"load_data",
"(",
"filename",
")",
":",
"file_root",
",",
"file_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"data",
"=",
"None",
"if",
"file_ext",
"==",
"'.npy'",
":",
"data",
"=",
"np",
".",
"load",
"(",
"filename",
")",
"elif",
"file_ext",
"==",
"'.npz'",
":",
"data",
"=",
"np",
".",
"load",
"(",
"filename",
")",
"[",
"'arr_0'",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Extension %s not supported for point reads'",
"%",
"(",
"file_ext",
")",
")",
"return",
"data"
]
| Loads data from a file.
Parameters
----------
filename : :obj:`str`
The file to load the collection from.
Returns
-------
:obj:`numpy.ndarray` of float
The data read from the file.
Raises
------
ValueError
If the file extension is not .npy or .npz. | [
"Loads",
"data",
"from",
"a",
"file",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L133-L159 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | Point.open | def open(filename, frame='unspecified'):
"""Create a Point from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created point.
Returns
-------
:obj:`Point`
A point created from the data in the file.
"""
data = BagOfPoints.load_data(filename)
return Point(data, frame) | python | def open(filename, frame='unspecified'):
"""Create a Point from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created point.
Returns
-------
:obj:`Point`
A point created from the data in the file.
"""
data = BagOfPoints.load_data(filename)
return Point(data, frame) | [
"def",
"open",
"(",
"filename",
",",
"frame",
"=",
"'unspecified'",
")",
":",
"data",
"=",
"BagOfPoints",
".",
"load_data",
"(",
"filename",
")",
"return",
"Point",
"(",
"data",
",",
"frame",
")"
]
| Create a Point from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created point.
Returns
-------
:obj:`Point`
A point created from the data in the file. | [
"Create",
"a",
"Point",
"from",
"data",
"saved",
"in",
"a",
"file",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L371-L388 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | Direction._check_valid_data | def _check_valid_data(self, data):
"""Checks that the incoming data is a Nx1 ndarray.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to verify.
Raises
------
ValueError
If the data is not of the correct shape or if the vector is not
normed.
"""
if len(data.shape) == 2 and data.shape[1] != 1:
raise ValueError('Can only initialize Direction from a single Nx1 array')
if np.abs(np.linalg.norm(data) - 1.0) > 1e-4:
raise ValueError('Direction data must have norm=1.0') | python | def _check_valid_data(self, data):
"""Checks that the incoming data is a Nx1 ndarray.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to verify.
Raises
------
ValueError
If the data is not of the correct shape or if the vector is not
normed.
"""
if len(data.shape) == 2 and data.shape[1] != 1:
raise ValueError('Can only initialize Direction from a single Nx1 array')
if np.abs(np.linalg.norm(data) - 1.0) > 1e-4:
raise ValueError('Direction data must have norm=1.0') | [
"def",
"_check_valid_data",
"(",
"self",
",",
"data",
")",
":",
"if",
"len",
"(",
"data",
".",
"shape",
")",
"==",
"2",
"and",
"data",
".",
"shape",
"[",
"1",
"]",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'Can only initialize Direction from a single Nx1 array'",
")",
"if",
"np",
".",
"abs",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"data",
")",
"-",
"1.0",
")",
">",
"1e-4",
":",
"raise",
"ValueError",
"(",
"'Direction data must have norm=1.0'",
")"
]
| Checks that the incoming data is a Nx1 ndarray.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to verify.
Raises
------
ValueError
If the data is not of the correct shape or if the vector is not
normed. | [
"Checks",
"that",
"the",
"incoming",
"data",
"is",
"a",
"Nx1",
"ndarray",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L405-L422 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | Direction.orthogonal_basis | def orthogonal_basis(self):
"""Return an orthogonal basis to this direction.
Note
----
Only implemented in 3D.
Returns
-------
:obj:`tuple` of :obj:`Direction`
The pair of normalized Direction vectors that form a basis of
this direction's orthogonal complement.
Raises
------
NotImplementedError
If the vector is not 3D
"""
if self.dim == 3:
x_arr = np.array([-self.data[1], self.data[0], 0])
if np.linalg.norm(x_arr) == 0:
x_arr = np.array([self.data[2], 0, 0])
x_arr = x_arr / np.linalg.norm(x_arr)
y_arr = np.cross(self.data, x_arr)
return Direction(x_arr, frame=self.frame), Direction(y_arr, frame=self.frame)
raise NotImplementedError('Orthogonal basis only supported for 3 dimensions') | python | def orthogonal_basis(self):
"""Return an orthogonal basis to this direction.
Note
----
Only implemented in 3D.
Returns
-------
:obj:`tuple` of :obj:`Direction`
The pair of normalized Direction vectors that form a basis of
this direction's orthogonal complement.
Raises
------
NotImplementedError
If the vector is not 3D
"""
if self.dim == 3:
x_arr = np.array([-self.data[1], self.data[0], 0])
if np.linalg.norm(x_arr) == 0:
x_arr = np.array([self.data[2], 0, 0])
x_arr = x_arr / np.linalg.norm(x_arr)
y_arr = np.cross(self.data, x_arr)
return Direction(x_arr, frame=self.frame), Direction(y_arr, frame=self.frame)
raise NotImplementedError('Orthogonal basis only supported for 3 dimensions') | [
"def",
"orthogonal_basis",
"(",
"self",
")",
":",
"if",
"self",
".",
"dim",
"==",
"3",
":",
"x_arr",
"=",
"np",
".",
"array",
"(",
"[",
"-",
"self",
".",
"data",
"[",
"1",
"]",
",",
"self",
".",
"data",
"[",
"0",
"]",
",",
"0",
"]",
")",
"if",
"np",
".",
"linalg",
".",
"norm",
"(",
"x_arr",
")",
"==",
"0",
":",
"x_arr",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"data",
"[",
"2",
"]",
",",
"0",
",",
"0",
"]",
")",
"x_arr",
"=",
"x_arr",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"x_arr",
")",
"y_arr",
"=",
"np",
".",
"cross",
"(",
"self",
".",
"data",
",",
"x_arr",
")",
"return",
"Direction",
"(",
"x_arr",
",",
"frame",
"=",
"self",
".",
"frame",
")",
",",
"Direction",
"(",
"y_arr",
",",
"frame",
"=",
"self",
".",
"frame",
")",
"raise",
"NotImplementedError",
"(",
"'Orthogonal basis only supported for 3 dimensions'",
")"
]
| Return an orthogonal basis to this direction.
Note
----
Only implemented in 3D.
Returns
-------
:obj:`tuple` of :obj:`Direction`
The pair of normalized Direction vectors that form a basis of
this direction's orthogonal complement.
Raises
------
NotImplementedError
If the vector is not 3D | [
"Return",
"an",
"orthogonal",
"basis",
"to",
"this",
"direction",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L424-L449 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | Direction.open | def open(filename, frame='unspecified'):
"""Create a Direction from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created Direction.
Returns
-------
:obj:`Direction`
A Direction created from the data in the file.
"""
data = BagOfPoints.load_data(filename)
return Direction(data, frame) | python | def open(filename, frame='unspecified'):
"""Create a Direction from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created Direction.
Returns
-------
:obj:`Direction`
A Direction created from the data in the file.
"""
data = BagOfPoints.load_data(filename)
return Direction(data, frame) | [
"def",
"open",
"(",
"filename",
",",
"frame",
"=",
"'unspecified'",
")",
":",
"data",
"=",
"BagOfPoints",
".",
"load_data",
"(",
"filename",
")",
"return",
"Direction",
"(",
"data",
",",
"frame",
")"
]
| Create a Direction from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created Direction.
Returns
-------
:obj:`Direction`
A Direction created from the data in the file. | [
"Create",
"a",
"Direction",
"from",
"data",
"saved",
"in",
"a",
"file",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L452-L469 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | Plane3D.split_points | def split_points(self, point_cloud):
"""Split a point cloud into two along this plane.
Parameters
----------
point_cloud : :obj:`PointCloud`
The PointCloud to divide in two.
Returns
-------
:obj:`tuple` of :obj:`PointCloud`
Two new PointCloud objects. The first contains points above the
plane, and the second contains points below the plane.
Raises
------
ValueError
If the input is not a PointCloud.
"""
if not isinstance(point_cloud, PointCloud):
raise ValueError('Can only split point clouds')
# compute indices above and below
above_plane = point_cloud._data - np.tile(self._x0.data, [1, point_cloud.num_points]).T.dot(self._n) > 0
above_plane = point_cloud.z_coords > 0 & above_plane
below_plane = point_cloud._data - np.tile(self._x0.data, [1, point_cloud.num_points]).T.dot(self._n) <= 0
below_plane = point_cloud.z_coords > 0 & below_plane
# split data
above_data = point_cloud.data[:, above_plane]
below_data = point_cloud.data[:, below_plane]
return PointCloud(above_data, point_cloud.frame), PointCloud(below_data, point_cloud.frame) | python | def split_points(self, point_cloud):
"""Split a point cloud into two along this plane.
Parameters
----------
point_cloud : :obj:`PointCloud`
The PointCloud to divide in two.
Returns
-------
:obj:`tuple` of :obj:`PointCloud`
Two new PointCloud objects. The first contains points above the
plane, and the second contains points below the plane.
Raises
------
ValueError
If the input is not a PointCloud.
"""
if not isinstance(point_cloud, PointCloud):
raise ValueError('Can only split point clouds')
# compute indices above and below
above_plane = point_cloud._data - np.tile(self._x0.data, [1, point_cloud.num_points]).T.dot(self._n) > 0
above_plane = point_cloud.z_coords > 0 & above_plane
below_plane = point_cloud._data - np.tile(self._x0.data, [1, point_cloud.num_points]).T.dot(self._n) <= 0
below_plane = point_cloud.z_coords > 0 & below_plane
# split data
above_data = point_cloud.data[:, above_plane]
below_data = point_cloud.data[:, below_plane]
return PointCloud(above_data, point_cloud.frame), PointCloud(below_data, point_cloud.frame) | [
"def",
"split_points",
"(",
"self",
",",
"point_cloud",
")",
":",
"if",
"not",
"isinstance",
"(",
"point_cloud",
",",
"PointCloud",
")",
":",
"raise",
"ValueError",
"(",
"'Can only split point clouds'",
")",
"# compute indices above and below",
"above_plane",
"=",
"point_cloud",
".",
"_data",
"-",
"np",
".",
"tile",
"(",
"self",
".",
"_x0",
".",
"data",
",",
"[",
"1",
",",
"point_cloud",
".",
"num_points",
"]",
")",
".",
"T",
".",
"dot",
"(",
"self",
".",
"_n",
")",
">",
"0",
"above_plane",
"=",
"point_cloud",
".",
"z_coords",
">",
"0",
"&",
"above_plane",
"below_plane",
"=",
"point_cloud",
".",
"_data",
"-",
"np",
".",
"tile",
"(",
"self",
".",
"_x0",
".",
"data",
",",
"[",
"1",
",",
"point_cloud",
".",
"num_points",
"]",
")",
".",
"T",
".",
"dot",
"(",
"self",
".",
"_n",
")",
"<=",
"0",
"below_plane",
"=",
"point_cloud",
".",
"z_coords",
">",
"0",
"&",
"below_plane",
"# split data",
"above_data",
"=",
"point_cloud",
".",
"data",
"[",
":",
",",
"above_plane",
"]",
"below_data",
"=",
"point_cloud",
".",
"data",
"[",
":",
",",
"below_plane",
"]",
"return",
"PointCloud",
"(",
"above_data",
",",
"point_cloud",
".",
"frame",
")",
",",
"PointCloud",
"(",
"below_data",
",",
"point_cloud",
".",
"frame",
")"
]
| Split a point cloud into two along this plane.
Parameters
----------
point_cloud : :obj:`PointCloud`
The PointCloud to divide in two.
Returns
-------
:obj:`tuple` of :obj:`PointCloud`
Two new PointCloud objects. The first contains points above the
plane, and the second contains points below the plane.
Raises
------
ValueError
If the input is not a PointCloud. | [
"Split",
"a",
"point",
"cloud",
"into",
"two",
"along",
"this",
"plane",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L498-L528 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | PointCloud.mean | def mean(self):
"""Returns the average point in the cloud.
Returns
-------
:obj:`Point`
The mean point in the PointCloud.
"""
mean_point_data = np.mean(self._data, axis=1)
return Point(mean_point_data, self._frame) | python | def mean(self):
"""Returns the average point in the cloud.
Returns
-------
:obj:`Point`
The mean point in the PointCloud.
"""
mean_point_data = np.mean(self._data, axis=1)
return Point(mean_point_data, self._frame) | [
"def",
"mean",
"(",
"self",
")",
":",
"mean_point_data",
"=",
"np",
".",
"mean",
"(",
"self",
".",
"_data",
",",
"axis",
"=",
"1",
")",
"return",
"Point",
"(",
"mean_point_data",
",",
"self",
".",
"_frame",
")"
]
| Returns the average point in the cloud.
Returns
-------
:obj:`Point`
The mean point in the PointCloud. | [
"Returns",
"the",
"average",
"point",
"in",
"the",
"cloud",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L587-L596 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | PointCloud.subsample | def subsample(self, rate, random=False):
"""Returns a subsampled version of the PointCloud.
Parameters
----------
rate : int
Only every rate-th element of the PointCloud is returned.
Returns
-------
:obj:`PointCloud`
A subsampled point cloud with N / rate total samples.
Raises
------
ValueError
If rate is not a positive integer.
"""
if type(rate) != int and rate < 1:
raise ValueError('Can only subsample with strictly positive integer rate')
indices = np.arange(self.num_points)
if random:
np.random.shuffle(indices)
subsample_inds = indices[::rate]
subsampled_data = self._data[:,subsample_inds]
return PointCloud(subsampled_data, self._frame), subsample_inds | python | def subsample(self, rate, random=False):
"""Returns a subsampled version of the PointCloud.
Parameters
----------
rate : int
Only every rate-th element of the PointCloud is returned.
Returns
-------
:obj:`PointCloud`
A subsampled point cloud with N / rate total samples.
Raises
------
ValueError
If rate is not a positive integer.
"""
if type(rate) != int and rate < 1:
raise ValueError('Can only subsample with strictly positive integer rate')
indices = np.arange(self.num_points)
if random:
np.random.shuffle(indices)
subsample_inds = indices[::rate]
subsampled_data = self._data[:,subsample_inds]
return PointCloud(subsampled_data, self._frame), subsample_inds | [
"def",
"subsample",
"(",
"self",
",",
"rate",
",",
"random",
"=",
"False",
")",
":",
"if",
"type",
"(",
"rate",
")",
"!=",
"int",
"and",
"rate",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'Can only subsample with strictly positive integer rate'",
")",
"indices",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"num_points",
")",
"if",
"random",
":",
"np",
".",
"random",
".",
"shuffle",
"(",
"indices",
")",
"subsample_inds",
"=",
"indices",
"[",
":",
":",
"rate",
"]",
"subsampled_data",
"=",
"self",
".",
"_data",
"[",
":",
",",
"subsample_inds",
"]",
"return",
"PointCloud",
"(",
"subsampled_data",
",",
"self",
".",
"_frame",
")",
",",
"subsample_inds"
]
| Returns a subsampled version of the PointCloud.
Parameters
----------
rate : int
Only every rate-th element of the PointCloud is returned.
Returns
-------
:obj:`PointCloud`
A subsampled point cloud with N / rate total samples.
Raises
------
ValueError
If rate is not a positive integer. | [
"Returns",
"a",
"subsampled",
"version",
"of",
"the",
"PointCloud",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L598-L623 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | PointCloud.box_mask | def box_mask(self, box):
"""Return a PointCloud containing only points within the given Box.
Parameters
----------
box : :obj:`Box`
A box whose boundaries are used to filter points.
Returns
-------
:obj:`PointCloud`
A filtered PointCloud whose points are all in the given box.
:obj:`numpy.ndarray`
Array of indices of the segmented points in the original cloud
Raises
------
ValueError
If the input is not a box in the same frame as the PointCloud.
"""
if not isinstance(box, Box):
raise ValueError('Must provide Box object')
if box.frame != self.frame:
raise ValueError('Box must be in same frame as PointCloud')
all_points = self.data.T
cond1 = np.all(box.min_pt <= all_points, axis=1)
cond2 = np.all(all_points <= box.max_pt, axis=1)
valid_point_indices = np.where(np.logical_and(cond1, cond2))[0]
valid_points = all_points[valid_point_indices]
return PointCloud(valid_points.T, self.frame), valid_point_indices | python | def box_mask(self, box):
"""Return a PointCloud containing only points within the given Box.
Parameters
----------
box : :obj:`Box`
A box whose boundaries are used to filter points.
Returns
-------
:obj:`PointCloud`
A filtered PointCloud whose points are all in the given box.
:obj:`numpy.ndarray`
Array of indices of the segmented points in the original cloud
Raises
------
ValueError
If the input is not a box in the same frame as the PointCloud.
"""
if not isinstance(box, Box):
raise ValueError('Must provide Box object')
if box.frame != self.frame:
raise ValueError('Box must be in same frame as PointCloud')
all_points = self.data.T
cond1 = np.all(box.min_pt <= all_points, axis=1)
cond2 = np.all(all_points <= box.max_pt, axis=1)
valid_point_indices = np.where(np.logical_and(cond1, cond2))[0]
valid_points = all_points[valid_point_indices]
return PointCloud(valid_points.T, self.frame), valid_point_indices | [
"def",
"box_mask",
"(",
"self",
",",
"box",
")",
":",
"if",
"not",
"isinstance",
"(",
"box",
",",
"Box",
")",
":",
"raise",
"ValueError",
"(",
"'Must provide Box object'",
")",
"if",
"box",
".",
"frame",
"!=",
"self",
".",
"frame",
":",
"raise",
"ValueError",
"(",
"'Box must be in same frame as PointCloud'",
")",
"all_points",
"=",
"self",
".",
"data",
".",
"T",
"cond1",
"=",
"np",
".",
"all",
"(",
"box",
".",
"min_pt",
"<=",
"all_points",
",",
"axis",
"=",
"1",
")",
"cond2",
"=",
"np",
".",
"all",
"(",
"all_points",
"<=",
"box",
".",
"max_pt",
",",
"axis",
"=",
"1",
")",
"valid_point_indices",
"=",
"np",
".",
"where",
"(",
"np",
".",
"logical_and",
"(",
"cond1",
",",
"cond2",
")",
")",
"[",
"0",
"]",
"valid_points",
"=",
"all_points",
"[",
"valid_point_indices",
"]",
"return",
"PointCloud",
"(",
"valid_points",
".",
"T",
",",
"self",
".",
"frame",
")",
",",
"valid_point_indices"
]
| Return a PointCloud containing only points within the given Box.
Parameters
----------
box : :obj:`Box`
A box whose boundaries are used to filter points.
Returns
-------
:obj:`PointCloud`
A filtered PointCloud whose points are all in the given box.
:obj:`numpy.ndarray`
Array of indices of the segmented points in the original cloud
Raises
------
ValueError
If the input is not a box in the same frame as the PointCloud. | [
"Return",
"a",
"PointCloud",
"containing",
"only",
"points",
"within",
"the",
"given",
"Box",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L625-L655 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | PointCloud.best_fit_plane | def best_fit_plane(self):
"""Fits a plane to the point cloud using least squares.
Returns
-------
:obj:`tuple` of :obj:`numpy.ndarray` of float
A normal vector to and point in the fitted plane.
"""
X = np.c_[self.x_coords, self.y_coords, np.ones(self.num_points)]
y = self.z_coords
A = X.T.dot(X)
b = X.T.dot(y)
w = np.linalg.inv(A).dot(b)
n = np.array([w[0], w[1], -1])
n = n / np.linalg.norm(n)
n = Direction(n, self._frame)
x0 = self.mean()
return n, x0 | python | def best_fit_plane(self):
"""Fits a plane to the point cloud using least squares.
Returns
-------
:obj:`tuple` of :obj:`numpy.ndarray` of float
A normal vector to and point in the fitted plane.
"""
X = np.c_[self.x_coords, self.y_coords, np.ones(self.num_points)]
y = self.z_coords
A = X.T.dot(X)
b = X.T.dot(y)
w = np.linalg.inv(A).dot(b)
n = np.array([w[0], w[1], -1])
n = n / np.linalg.norm(n)
n = Direction(n, self._frame)
x0 = self.mean()
return n, x0 | [
"def",
"best_fit_plane",
"(",
"self",
")",
":",
"X",
"=",
"np",
".",
"c_",
"[",
"self",
".",
"x_coords",
",",
"self",
".",
"y_coords",
",",
"np",
".",
"ones",
"(",
"self",
".",
"num_points",
")",
"]",
"y",
"=",
"self",
".",
"z_coords",
"A",
"=",
"X",
".",
"T",
".",
"dot",
"(",
"X",
")",
"b",
"=",
"X",
".",
"T",
".",
"dot",
"(",
"y",
")",
"w",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"A",
")",
".",
"dot",
"(",
"b",
")",
"n",
"=",
"np",
".",
"array",
"(",
"[",
"w",
"[",
"0",
"]",
",",
"w",
"[",
"1",
"]",
",",
"-",
"1",
"]",
")",
"n",
"=",
"n",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"n",
")",
"n",
"=",
"Direction",
"(",
"n",
",",
"self",
".",
"_frame",
")",
"x0",
"=",
"self",
".",
"mean",
"(",
")",
"return",
"n",
",",
"x0"
]
| Fits a plane to the point cloud using least squares.
Returns
-------
:obj:`tuple` of :obj:`numpy.ndarray` of float
A normal vector to and point in the fitted plane. | [
"Fits",
"a",
"plane",
"to",
"the",
"point",
"cloud",
"using",
"least",
"squares",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L657-L674 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | PointCloud.remove_zero_points | def remove_zero_points(self):
"""Removes points with a zero in the z-axis.
Note
----
This returns nothing and updates the PointCloud in-place.
"""
points_of_interest = np.where(self.z_coords != 0.0)[0]
self._data = self.data[:, points_of_interest] | python | def remove_zero_points(self):
"""Removes points with a zero in the z-axis.
Note
----
This returns nothing and updates the PointCloud in-place.
"""
points_of_interest = np.where(self.z_coords != 0.0)[0]
self._data = self.data[:, points_of_interest] | [
"def",
"remove_zero_points",
"(",
"self",
")",
":",
"points_of_interest",
"=",
"np",
".",
"where",
"(",
"self",
".",
"z_coords",
"!=",
"0.0",
")",
"[",
"0",
"]",
"self",
".",
"_data",
"=",
"self",
".",
"data",
"[",
":",
",",
"points_of_interest",
"]"
]
| Removes points with a zero in the z-axis.
Note
----
This returns nothing and updates the PointCloud in-place. | [
"Removes",
"points",
"with",
"a",
"zero",
"in",
"the",
"z",
"-",
"axis",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L687-L695 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | PointCloud.remove_infinite_points | def remove_infinite_points(self):
"""Removes infinite points.
Note
----
This returns nothing and updates the PointCloud in-place.
"""
points_of_interest = np.where(np.all(np.isfinite(self.data), axis=0))[0]
self._data = self.data[:, points_of_interest] | python | def remove_infinite_points(self):
"""Removes infinite points.
Note
----
This returns nothing and updates the PointCloud in-place.
"""
points_of_interest = np.where(np.all(np.isfinite(self.data), axis=0))[0]
self._data = self.data[:, points_of_interest] | [
"def",
"remove_infinite_points",
"(",
"self",
")",
":",
"points_of_interest",
"=",
"np",
".",
"where",
"(",
"np",
".",
"all",
"(",
"np",
".",
"isfinite",
"(",
"self",
".",
"data",
")",
",",
"axis",
"=",
"0",
")",
")",
"[",
"0",
"]",
"self",
".",
"_data",
"=",
"self",
".",
"data",
"[",
":",
",",
"points_of_interest",
"]"
]
| Removes infinite points.
Note
----
This returns nothing and updates the PointCloud in-place. | [
"Removes",
"infinite",
"points",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L697-L705 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | PointCloud.open | def open(filename, frame='unspecified'):
"""Create a PointCloud from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created PointCloud.
Returns
-------
:obj:`PointCloud`
A PointCloud created from the data in the file.
"""
data = BagOfPoints.load_data(filename)
return PointCloud(data, frame) | python | def open(filename, frame='unspecified'):
"""Create a PointCloud from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created PointCloud.
Returns
-------
:obj:`PointCloud`
A PointCloud created from the data in the file.
"""
data = BagOfPoints.load_data(filename)
return PointCloud(data, frame) | [
"def",
"open",
"(",
"filename",
",",
"frame",
"=",
"'unspecified'",
")",
":",
"data",
"=",
"BagOfPoints",
".",
"load_data",
"(",
"filename",
")",
"return",
"PointCloud",
"(",
"data",
",",
"frame",
")"
]
| Create a PointCloud from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created PointCloud.
Returns
-------
:obj:`PointCloud`
A PointCloud created from the data in the file. | [
"Create",
"a",
"PointCloud",
"from",
"data",
"saved",
"in",
"a",
"file",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L817-L834 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | NormalCloud.subsample | def subsample(self, rate):
"""Returns a subsampled version of the NormalCloud.
Parameters
----------
rate : int
Only every rate-th element of the NormalCloud is returned.
Returns
-------
:obj:`RateCloud`
A subsampled point cloud with N / rate total samples.
Raises
------
ValueError
If rate is not a positive integer.
"""
if type(rate) != int and rate < 1:
raise ValueError('Can only subsample with strictly positive integer rate')
subsample_inds = np.arange(self.num_points)[::rate]
subsampled_data = self._data[:,subsample_inds]
return NormalCloud(subsampled_data, self._frame) | python | def subsample(self, rate):
"""Returns a subsampled version of the NormalCloud.
Parameters
----------
rate : int
Only every rate-th element of the NormalCloud is returned.
Returns
-------
:obj:`RateCloud`
A subsampled point cloud with N / rate total samples.
Raises
------
ValueError
If rate is not a positive integer.
"""
if type(rate) != int and rate < 1:
raise ValueError('Can only subsample with strictly positive integer rate')
subsample_inds = np.arange(self.num_points)[::rate]
subsampled_data = self._data[:,subsample_inds]
return NormalCloud(subsampled_data, self._frame) | [
"def",
"subsample",
"(",
"self",
",",
"rate",
")",
":",
"if",
"type",
"(",
"rate",
")",
"!=",
"int",
"and",
"rate",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'Can only subsample with strictly positive integer rate'",
")",
"subsample_inds",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"num_points",
")",
"[",
":",
":",
"rate",
"]",
"subsampled_data",
"=",
"self",
".",
"_data",
"[",
":",
",",
"subsample_inds",
"]",
"return",
"NormalCloud",
"(",
"subsampled_data",
",",
"self",
".",
"_frame",
")"
]
| Returns a subsampled version of the NormalCloud.
Parameters
----------
rate : int
Only every rate-th element of the NormalCloud is returned.
Returns
-------
:obj:`RateCloud`
A subsampled point cloud with N / rate total samples.
Raises
------
ValueError
If rate is not a positive integer. | [
"Returns",
"a",
"subsampled",
"version",
"of",
"the",
"NormalCloud",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L897-L919 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | NormalCloud.remove_zero_normals | def remove_zero_normals(self):
"""Removes normal vectors with a zero magnitude.
Note
----
This returns nothing and updates the NormalCloud in-place.
"""
points_of_interest = np.where(np.linalg.norm(self._data, axis=0) != 0.0)[0]
self._data = self._data[:, points_of_interest] | python | def remove_zero_normals(self):
"""Removes normal vectors with a zero magnitude.
Note
----
This returns nothing and updates the NormalCloud in-place.
"""
points_of_interest = np.where(np.linalg.norm(self._data, axis=0) != 0.0)[0]
self._data = self._data[:, points_of_interest] | [
"def",
"remove_zero_normals",
"(",
"self",
")",
":",
"points_of_interest",
"=",
"np",
".",
"where",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"_data",
",",
"axis",
"=",
"0",
")",
"!=",
"0.0",
")",
"[",
"0",
"]",
"self",
".",
"_data",
"=",
"self",
".",
"_data",
"[",
":",
",",
"points_of_interest",
"]"
]
| Removes normal vectors with a zero magnitude.
Note
----
This returns nothing and updates the NormalCloud in-place. | [
"Removes",
"normal",
"vectors",
"with",
"a",
"zero",
"magnitude",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L921-L929 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | NormalCloud.remove_nan_normals | def remove_nan_normals(self):
"""Removes normal vectors with nan magnitude.
Note
----
This returns nothing and updates the NormalCloud in-place.
"""
points_of_interest = np.where(np.isfinite(np.linalg.norm(self._data, axis=0)))[0]
self._data = self._data[:, points_of_interest] | python | def remove_nan_normals(self):
"""Removes normal vectors with nan magnitude.
Note
----
This returns nothing and updates the NormalCloud in-place.
"""
points_of_interest = np.where(np.isfinite(np.linalg.norm(self._data, axis=0)))[0]
self._data = self._data[:, points_of_interest] | [
"def",
"remove_nan_normals",
"(",
"self",
")",
":",
"points_of_interest",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isfinite",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"_data",
",",
"axis",
"=",
"0",
")",
")",
")",
"[",
"0",
"]",
"self",
".",
"_data",
"=",
"self",
".",
"_data",
"[",
":",
",",
"points_of_interest",
"]"
]
| Removes normal vectors with nan magnitude.
Note
----
This returns nothing and updates the NormalCloud in-place. | [
"Removes",
"normal",
"vectors",
"with",
"nan",
"magnitude",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L931-L939 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | NormalCloud.open | def open(filename, frame='unspecified'):
"""Create a NormalCloud from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created NormalCloud.
Returns
-------
:obj:`NormalCloud`
A NormalCloud created from the data in the file.
"""
data = BagOfPoints.load_data(filename)
return NormalCloud(data, frame) | python | def open(filename, frame='unspecified'):
"""Create a NormalCloud from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created NormalCloud.
Returns
-------
:obj:`NormalCloud`
A NormalCloud created from the data in the file.
"""
data = BagOfPoints.load_data(filename)
return NormalCloud(data, frame) | [
"def",
"open",
"(",
"filename",
",",
"frame",
"=",
"'unspecified'",
")",
":",
"data",
"=",
"BagOfPoints",
".",
"load_data",
"(",
"filename",
")",
"return",
"NormalCloud",
"(",
"data",
",",
"frame",
")"
]
| Create a NormalCloud from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created NormalCloud.
Returns
-------
:obj:`NormalCloud`
A NormalCloud created from the data in the file. | [
"Create",
"a",
"NormalCloud",
"from",
"data",
"saved",
"in",
"a",
"file",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L942-L959 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | ImageCoords.open | def open(filename, frame='unspecified'):
"""Create an ImageCoords from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created ImageCoords.
Returns
-------
:obj:`ImageCoords`
An ImageCoords created from the data in the file.
"""
data = BagOfPoints.load_data(filename)
return ImageCoords(data, frame) | python | def open(filename, frame='unspecified'):
"""Create an ImageCoords from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created ImageCoords.
Returns
-------
:obj:`ImageCoords`
An ImageCoords created from the data in the file.
"""
data = BagOfPoints.load_data(filename)
return ImageCoords(data, frame) | [
"def",
"open",
"(",
"filename",
",",
"frame",
"=",
"'unspecified'",
")",
":",
"data",
"=",
"BagOfPoints",
".",
"load_data",
"(",
"filename",
")",
"return",
"ImageCoords",
"(",
"data",
",",
"frame",
")"
]
| Create an ImageCoords from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created ImageCoords.
Returns
-------
:obj:`ImageCoords`
An ImageCoords created from the data in the file. | [
"Create",
"an",
"ImageCoords",
"from",
"data",
"saved",
"in",
"a",
"file",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L1015-L1032 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | RgbCloud.open | def open(filename, frame='unspecified'):
"""Create a RgbCloud from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created RgbCloud.
Returns
-------
:obj:`RgbCloud`
A RgdCloud created from the data in the file.
"""
data = BagOfPoints.load_data(filename)
return RgbCloud(data, frame) | python | def open(filename, frame='unspecified'):
"""Create a RgbCloud from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created RgbCloud.
Returns
-------
:obj:`RgbCloud`
A RgdCloud created from the data in the file.
"""
data = BagOfPoints.load_data(filename)
return RgbCloud(data, frame) | [
"def",
"open",
"(",
"filename",
",",
"frame",
"=",
"'unspecified'",
")",
":",
"data",
"=",
"BagOfPoints",
".",
"load_data",
"(",
"filename",
")",
"return",
"RgbCloud",
"(",
"data",
",",
"frame",
")"
]
| Create a RgbCloud from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created RgbCloud.
Returns
-------
:obj:`RgbCloud`
A RgdCloud created from the data in the file. | [
"Create",
"a",
"RgbCloud",
"from",
"data",
"saved",
"in",
"a",
"file",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L1093-L1110 | train |
BerkeleyAutomation/autolab_core | autolab_core/points.py | PointNormalCloud.remove_zero_points | def remove_zero_points(self):
"""Remove all elements where the norms and points are zero.
Note
----
This returns nothing and updates the NormalCloud in-place.
"""
points_of_interest = np.where((np.linalg.norm(self.point_cloud.data, axis=0) != 0.0) &
(np.linalg.norm(self.normal_cloud.data, axis=0) != 0.0) &
(np.isfinite(self.normal_cloud.data[0,:])))[0]
self.point_cloud._data = self.point_cloud.data[:, points_of_interest]
self.normal_cloud._data = self.normal_cloud.data[:, points_of_interest] | python | def remove_zero_points(self):
"""Remove all elements where the norms and points are zero.
Note
----
This returns nothing and updates the NormalCloud in-place.
"""
points_of_interest = np.where((np.linalg.norm(self.point_cloud.data, axis=0) != 0.0) &
(np.linalg.norm(self.normal_cloud.data, axis=0) != 0.0) &
(np.isfinite(self.normal_cloud.data[0,:])))[0]
self.point_cloud._data = self.point_cloud.data[:, points_of_interest]
self.normal_cloud._data = self.normal_cloud.data[:, points_of_interest] | [
"def",
"remove_zero_points",
"(",
"self",
")",
":",
"points_of_interest",
"=",
"np",
".",
"where",
"(",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"point_cloud",
".",
"data",
",",
"axis",
"=",
"0",
")",
"!=",
"0.0",
")",
"&",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"normal_cloud",
".",
"data",
",",
"axis",
"=",
"0",
")",
"!=",
"0.0",
")",
"&",
"(",
"np",
".",
"isfinite",
"(",
"self",
".",
"normal_cloud",
".",
"data",
"[",
"0",
",",
":",
"]",
")",
")",
")",
"[",
"0",
"]",
"self",
".",
"point_cloud",
".",
"_data",
"=",
"self",
".",
"point_cloud",
".",
"data",
"[",
":",
",",
"points_of_interest",
"]",
"self",
".",
"normal_cloud",
".",
"_data",
"=",
"self",
".",
"normal_cloud",
".",
"data",
"[",
":",
",",
"points_of_interest",
"]"
]
| Remove all elements where the norms and points are zero.
Note
----
This returns nothing and updates the NormalCloud in-place. | [
"Remove",
"all",
"elements",
"where",
"the",
"norms",
"and",
"points",
"are",
"zero",
"."
]
| 8f3813f6401972868cc5e3981ba1b4382d4418d5 | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L1201-L1212 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.