repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
rfk/playitagainsam | playitagainsam/util.py | forkexec_pty | def forkexec_pty(argv, env=None, size=None):
"""Fork a child process attached to a pty."""
child_pid, child_fd = pty.fork()
if child_pid == 0:
os.closerange(3, MAXFD)
environ = os.environ.copy()
if env is not None:
environ.update(env)
os.execve(argv[0], argv, environ)
if size is None:
try:
size = get_terminal_size(1)
except Exception:
size = (80, 24)
set_terminal_size(child_fd, size)
return child_pid, child_fd | python | def forkexec_pty(argv, env=None, size=None):
"""Fork a child process attached to a pty."""
child_pid, child_fd = pty.fork()
if child_pid == 0:
os.closerange(3, MAXFD)
environ = os.environ.copy()
if env is not None:
environ.update(env)
os.execve(argv[0], argv, environ)
if size is None:
try:
size = get_terminal_size(1)
except Exception:
size = (80, 24)
set_terminal_size(child_fd, size)
return child_pid, child_fd | [
"def",
"forkexec_pty",
"(",
"argv",
",",
"env",
"=",
"None",
",",
"size",
"=",
"None",
")",
":",
"child_pid",
",",
"child_fd",
"=",
"pty",
".",
"fork",
"(",
")",
"if",
"child_pid",
"==",
"0",
":",
"os",
".",
"closerange",
"(",
"3",
",",
"MAXFD",
")",
"environ",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"if",
"env",
"is",
"not",
"None",
":",
"environ",
".",
"update",
"(",
"env",
")",
"os",
".",
"execve",
"(",
"argv",
"[",
"0",
"]",
",",
"argv",
",",
"environ",
")",
"if",
"size",
"is",
"None",
":",
"try",
":",
"size",
"=",
"get_terminal_size",
"(",
"1",
")",
"except",
"Exception",
":",
"size",
"=",
"(",
"80",
",",
"24",
")",
"set_terminal_size",
"(",
"child_fd",
",",
"size",
")",
"return",
"child_pid",
",",
"child_fd"
] | Fork a child process attached to a pty. | [
"Fork",
"a",
"child",
"process",
"attached",
"to",
"a",
"pty",
"."
] | 897cc8e8ca920a4afb8597b4a345361065a3f108 | https://github.com/rfk/playitagainsam/blob/897cc8e8ca920a4afb8597b4a345361065a3f108/playitagainsam/util.py#L87-L102 | train |
rfk/playitagainsam | playitagainsam/util.py | get_ancestor_processes | def get_ancestor_processes():
"""Get a list of the executables of all ancestor processes."""
if not _ANCESTOR_PROCESSES and psutil is not None:
proc = psutil.Process(os.getpid())
while proc.parent() is not None:
try:
_ANCESTOR_PROCESSES.append(proc.parent().exe())
proc = proc.parent()
except psutil.Error:
break
return _ANCESTOR_PROCESSES | python | def get_ancestor_processes():
"""Get a list of the executables of all ancestor processes."""
if not _ANCESTOR_PROCESSES and psutil is not None:
proc = psutil.Process(os.getpid())
while proc.parent() is not None:
try:
_ANCESTOR_PROCESSES.append(proc.parent().exe())
proc = proc.parent()
except psutil.Error:
break
return _ANCESTOR_PROCESSES | [
"def",
"get_ancestor_processes",
"(",
")",
":",
"if",
"not",
"_ANCESTOR_PROCESSES",
"and",
"psutil",
"is",
"not",
"None",
":",
"proc",
"=",
"psutil",
".",
"Process",
"(",
"os",
".",
"getpid",
"(",
")",
")",
"while",
"proc",
".",
"parent",
"(",
")",
"is",
"not",
"None",
":",
"try",
":",
"_ANCESTOR_PROCESSES",
".",
"append",
"(",
"proc",
".",
"parent",
"(",
")",
".",
"exe",
"(",
")",
")",
"proc",
"=",
"proc",
".",
"parent",
"(",
")",
"except",
"psutil",
".",
"Error",
":",
"break",
"return",
"_ANCESTOR_PROCESSES"
] | Get a list of the executables of all ancestor processes. | [
"Get",
"a",
"list",
"of",
"the",
"executables",
"of",
"all",
"ancestor",
"processes",
"."
] | 897cc8e8ca920a4afb8597b4a345361065a3f108 | https://github.com/rfk/playitagainsam/blob/897cc8e8ca920a4afb8597b4a345361065a3f108/playitagainsam/util.py#L121-L131 | train |
rfk/playitagainsam | playitagainsam/util.py | get_default_shell | def get_default_shell(environ=None, fallback=_UNSPECIFIED):
"""Get the user's default shell program."""
if environ is None:
environ = os.environ
# If the option is specified in the environment, respect it.
if "PIAS_OPT_SHELL" in environ:
return environ["PIAS_OPT_SHELL"]
# Find all candiate shell programs.
shells = []
for filename in (environ.get("SHELL"), "bash", "sh"):
if filename is not None:
filepath = find_executable(filename, environ)
if filepath is not None:
shells.append(filepath)
# If one of them is an ancestor process, use that.
for ancestor in get_ancestor_processes():
if ancestor in shells:
return ancestor
# Otherwise use the first option that we found.
for shell in shells:
return shell
# Use an explicit fallback option if given.
if fallback is not _UNSPECIFIED:
return fallback
raise ValueError("Could not find a shell") | python | def get_default_shell(environ=None, fallback=_UNSPECIFIED):
"""Get the user's default shell program."""
if environ is None:
environ = os.environ
# If the option is specified in the environment, respect it.
if "PIAS_OPT_SHELL" in environ:
return environ["PIAS_OPT_SHELL"]
# Find all candiate shell programs.
shells = []
for filename in (environ.get("SHELL"), "bash", "sh"):
if filename is not None:
filepath = find_executable(filename, environ)
if filepath is not None:
shells.append(filepath)
# If one of them is an ancestor process, use that.
for ancestor in get_ancestor_processes():
if ancestor in shells:
return ancestor
# Otherwise use the first option that we found.
for shell in shells:
return shell
# Use an explicit fallback option if given.
if fallback is not _UNSPECIFIED:
return fallback
raise ValueError("Could not find a shell") | [
"def",
"get_default_shell",
"(",
"environ",
"=",
"None",
",",
"fallback",
"=",
"_UNSPECIFIED",
")",
":",
"if",
"environ",
"is",
"None",
":",
"environ",
"=",
"os",
".",
"environ",
"# If the option is specified in the environment, respect it.",
"if",
"\"PIAS_OPT_SHELL\"",
"in",
"environ",
":",
"return",
"environ",
"[",
"\"PIAS_OPT_SHELL\"",
"]",
"# Find all candiate shell programs.",
"shells",
"=",
"[",
"]",
"for",
"filename",
"in",
"(",
"environ",
".",
"get",
"(",
"\"SHELL\"",
")",
",",
"\"bash\"",
",",
"\"sh\"",
")",
":",
"if",
"filename",
"is",
"not",
"None",
":",
"filepath",
"=",
"find_executable",
"(",
"filename",
",",
"environ",
")",
"if",
"filepath",
"is",
"not",
"None",
":",
"shells",
".",
"append",
"(",
"filepath",
")",
"# If one of them is an ancestor process, use that.",
"for",
"ancestor",
"in",
"get_ancestor_processes",
"(",
")",
":",
"if",
"ancestor",
"in",
"shells",
":",
"return",
"ancestor",
"# Otherwise use the first option that we found.",
"for",
"shell",
"in",
"shells",
":",
"return",
"shell",
"# Use an explicit fallback option if given.",
"if",
"fallback",
"is",
"not",
"_UNSPECIFIED",
":",
"return",
"fallback",
"raise",
"ValueError",
"(",
"\"Could not find a shell\"",
")"
] | Get the user's default shell program. | [
"Get",
"the",
"user",
"s",
"default",
"shell",
"program",
"."
] | 897cc8e8ca920a4afb8597b4a345361065a3f108 | https://github.com/rfk/playitagainsam/blob/897cc8e8ca920a4afb8597b4a345361065a3f108/playitagainsam/util.py#L134-L158 | train |
rfk/playitagainsam | playitagainsam/util.py | get_default_terminal | def get_default_terminal(environ=None, fallback=_UNSPECIFIED):
"""Get the user's default terminal program."""
if environ is None:
environ = os.environ
# If the option is specified in the environment, respect it.
if "PIAS_OPT_TERMINAL" in environ:
return environ["PIAS_OPT_TERMINAL"]
# Find all candiate terminal programs.
terminals = []
colorterm = environ.get("COLORTERM")
for filename in (colorterm, "gnome-terminal", "konsole", "xterm"):
if filename is not None:
filepath = find_executable(filename, environ)
if filepath is not None:
terminals.append(filepath)
# If one of them is an ancestor process, use that.
for ancestor in get_ancestor_processes():
if ancestor in terminals:
return ancestor
# Otherwise use the first option that we found.
for term in terminals:
return term
# Use an explicit fallback option if given.
if fallback is not _UNSPECIFIED:
return fallback
raise ValueError("Could not find a terminal") | python | def get_default_terminal(environ=None, fallback=_UNSPECIFIED):
"""Get the user's default terminal program."""
if environ is None:
environ = os.environ
# If the option is specified in the environment, respect it.
if "PIAS_OPT_TERMINAL" in environ:
return environ["PIAS_OPT_TERMINAL"]
# Find all candiate terminal programs.
terminals = []
colorterm = environ.get("COLORTERM")
for filename in (colorterm, "gnome-terminal", "konsole", "xterm"):
if filename is not None:
filepath = find_executable(filename, environ)
if filepath is not None:
terminals.append(filepath)
# If one of them is an ancestor process, use that.
for ancestor in get_ancestor_processes():
if ancestor in terminals:
return ancestor
# Otherwise use the first option that we found.
for term in terminals:
return term
# Use an explicit fallback option if given.
if fallback is not _UNSPECIFIED:
return fallback
raise ValueError("Could not find a terminal") | [
"def",
"get_default_terminal",
"(",
"environ",
"=",
"None",
",",
"fallback",
"=",
"_UNSPECIFIED",
")",
":",
"if",
"environ",
"is",
"None",
":",
"environ",
"=",
"os",
".",
"environ",
"# If the option is specified in the environment, respect it.",
"if",
"\"PIAS_OPT_TERMINAL\"",
"in",
"environ",
":",
"return",
"environ",
"[",
"\"PIAS_OPT_TERMINAL\"",
"]",
"# Find all candiate terminal programs.",
"terminals",
"=",
"[",
"]",
"colorterm",
"=",
"environ",
".",
"get",
"(",
"\"COLORTERM\"",
")",
"for",
"filename",
"in",
"(",
"colorterm",
",",
"\"gnome-terminal\"",
",",
"\"konsole\"",
",",
"\"xterm\"",
")",
":",
"if",
"filename",
"is",
"not",
"None",
":",
"filepath",
"=",
"find_executable",
"(",
"filename",
",",
"environ",
")",
"if",
"filepath",
"is",
"not",
"None",
":",
"terminals",
".",
"append",
"(",
"filepath",
")",
"# If one of them is an ancestor process, use that.",
"for",
"ancestor",
"in",
"get_ancestor_processes",
"(",
")",
":",
"if",
"ancestor",
"in",
"terminals",
":",
"return",
"ancestor",
"# Otherwise use the first option that we found.",
"for",
"term",
"in",
"terminals",
":",
"return",
"term",
"# Use an explicit fallback option if given.",
"if",
"fallback",
"is",
"not",
"_UNSPECIFIED",
":",
"return",
"fallback",
"raise",
"ValueError",
"(",
"\"Could not find a terminal\"",
")"
] | Get the user's default terminal program. | [
"Get",
"the",
"user",
"s",
"default",
"terminal",
"program",
"."
] | 897cc8e8ca920a4afb8597b4a345361065a3f108 | https://github.com/rfk/playitagainsam/blob/897cc8e8ca920a4afb8597b4a345361065a3f108/playitagainsam/util.py#L161-L186 | train |
rfk/playitagainsam | playitagainsam/util.py | get_pias_script | def get_pias_script(environ=None):
"""Get the path to the playitagainsam command-line script."""
if os.path.basename(sys.argv[0]) == "pias":
return sys.argv[0]
filepath = find_executable("pias", environ)
if filepath is not None:
return filepath
filepath = os.path.join(os.path.dirname(__file__), "__main__.py")
# XXX TODO: check if executable
if os.path.exists(filepath):
return filepath
raise RuntimeError("Could not locate the pias script.") | python | def get_pias_script(environ=None):
"""Get the path to the playitagainsam command-line script."""
if os.path.basename(sys.argv[0]) == "pias":
return sys.argv[0]
filepath = find_executable("pias", environ)
if filepath is not None:
return filepath
filepath = os.path.join(os.path.dirname(__file__), "__main__.py")
# XXX TODO: check if executable
if os.path.exists(filepath):
return filepath
raise RuntimeError("Could not locate the pias script.") | [
"def",
"get_pias_script",
"(",
"environ",
"=",
"None",
")",
":",
"if",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
"==",
"\"pias\"",
":",
"return",
"sys",
".",
"argv",
"[",
"0",
"]",
"filepath",
"=",
"find_executable",
"(",
"\"pias\"",
",",
"environ",
")",
"if",
"filepath",
"is",
"not",
"None",
":",
"return",
"filepath",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"\"__main__.py\"",
")",
"# XXX TODO: check if executable",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filepath",
")",
":",
"return",
"filepath",
"raise",
"RuntimeError",
"(",
"\"Could not locate the pias script.\"",
")"
] | Get the path to the playitagainsam command-line script. | [
"Get",
"the",
"path",
"to",
"the",
"playitagainsam",
"command",
"-",
"line",
"script",
"."
] | 897cc8e8ca920a4afb8597b4a345361065a3f108 | https://github.com/rfk/playitagainsam/blob/897cc8e8ca920a4afb8597b4a345361065a3f108/playitagainsam/util.py#L189-L200 | train |
ponty/eagexp | eagexp/airwires.py | airwires | def airwires(board, showgui=0):
'search for airwires in eagle board'
board = Path(board).expand().abspath()
file_out = tempfile.NamedTemporaryFile(suffix='.txt', delete=0)
file_out.close()
ulp = ulp_templ.replace('FILE_NAME', file_out.name)
file_ulp = tempfile.NamedTemporaryFile(suffix='.ulp', delete=0)
file_ulp.write(ulp.encode('utf-8'))
file_ulp.close()
commands = [
'run ' + file_ulp.name,
'quit',
]
command_eagle(board, commands=commands, showgui=showgui)
n = int(Path(file_out.name).text())
Path(file_out.name).remove()
Path(file_ulp.name).remove()
return n | python | def airwires(board, showgui=0):
'search for airwires in eagle board'
board = Path(board).expand().abspath()
file_out = tempfile.NamedTemporaryFile(suffix='.txt', delete=0)
file_out.close()
ulp = ulp_templ.replace('FILE_NAME', file_out.name)
file_ulp = tempfile.NamedTemporaryFile(suffix='.ulp', delete=0)
file_ulp.write(ulp.encode('utf-8'))
file_ulp.close()
commands = [
'run ' + file_ulp.name,
'quit',
]
command_eagle(board, commands=commands, showgui=showgui)
n = int(Path(file_out.name).text())
Path(file_out.name).remove()
Path(file_ulp.name).remove()
return n | [
"def",
"airwires",
"(",
"board",
",",
"showgui",
"=",
"0",
")",
":",
"board",
"=",
"Path",
"(",
"board",
")",
".",
"expand",
"(",
")",
".",
"abspath",
"(",
")",
"file_out",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.txt'",
",",
"delete",
"=",
"0",
")",
"file_out",
".",
"close",
"(",
")",
"ulp",
"=",
"ulp_templ",
".",
"replace",
"(",
"'FILE_NAME'",
",",
"file_out",
".",
"name",
")",
"file_ulp",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.ulp'",
",",
"delete",
"=",
"0",
")",
"file_ulp",
".",
"write",
"(",
"ulp",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"file_ulp",
".",
"close",
"(",
")",
"commands",
"=",
"[",
"'run '",
"+",
"file_ulp",
".",
"name",
",",
"'quit'",
",",
"]",
"command_eagle",
"(",
"board",
",",
"commands",
"=",
"commands",
",",
"showgui",
"=",
"showgui",
")",
"n",
"=",
"int",
"(",
"Path",
"(",
"file_out",
".",
"name",
")",
".",
"text",
"(",
")",
")",
"Path",
"(",
"file_out",
".",
"name",
")",
".",
"remove",
"(",
")",
"Path",
"(",
"file_ulp",
".",
"name",
")",
".",
"remove",
"(",
")",
"return",
"n"
] | search for airwires in eagle board | [
"search",
"for",
"airwires",
"in",
"eagle",
"board"
] | 1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2 | https://github.com/ponty/eagexp/blob/1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2/eagexp/airwires.py#L27-L51 | train |
SUNCAT-Center/CatHub | cathub/cathubsqlite.py | CathubSQLite._initialize | def _initialize(self, con):
"""Set up tables in SQL"""
if self.initialized:
return
SQLite3Database()._initialize(con) # ASE db initialization
cur = con.execute(
'SELECT COUNT(*) FROM sqlite_master WHERE name="reaction"')
if cur.fetchone()[0] == 0: # no reaction table
for init_command in init_commands:
con.execute(init_command) # Create tables
con.commit()
self.initialized = True | python | def _initialize(self, con):
"""Set up tables in SQL"""
if self.initialized:
return
SQLite3Database()._initialize(con) # ASE db initialization
cur = con.execute(
'SELECT COUNT(*) FROM sqlite_master WHERE name="reaction"')
if cur.fetchone()[0] == 0: # no reaction table
for init_command in init_commands:
con.execute(init_command) # Create tables
con.commit()
self.initialized = True | [
"def",
"_initialize",
"(",
"self",
",",
"con",
")",
":",
"if",
"self",
".",
"initialized",
":",
"return",
"SQLite3Database",
"(",
")",
".",
"_initialize",
"(",
"con",
")",
"# ASE db initialization",
"cur",
"=",
"con",
".",
"execute",
"(",
"'SELECT COUNT(*) FROM sqlite_master WHERE name=\"reaction\"'",
")",
"if",
"cur",
".",
"fetchone",
"(",
")",
"[",
"0",
"]",
"==",
"0",
":",
"# no reaction table",
"for",
"init_command",
"in",
"init_commands",
":",
"con",
".",
"execute",
"(",
"init_command",
")",
"# Create tables",
"con",
".",
"commit",
"(",
")",
"self",
".",
"initialized",
"=",
"True"
] | Set up tables in SQL | [
"Set",
"up",
"tables",
"in",
"SQL"
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/cathubsqlite.py#L118-L133 | train |
SUNCAT-Center/CatHub | cathub/cathubsqlite.py | CathubSQLite.write_publication | def write_publication(self, values):
"""
Write publication info to db
Parameters
----------
values: dict with entries
{'pub_id': str (short name for publication),
'authors': list of str ()
'journal': str,
'volume': str,
'number': str,
'pages': 'str'
'year': int,
'publisher': str,
'doi': str,
'tags': list of str}
"""
con = self.connection or self._connect()
self._initialize(con)
cur = con.cursor()
values = (values['pub_id'],
values['title'],
json.dumps(values['authors']),
values['journal'],
values['volume'],
values['number'],
values['pages'],
values['year'],
values['publisher'],
values['doi'],
json.dumps(values['tags']))
q = self.default + ',' + ', '.join('?' * len(values))
cur.execute('INSERT OR IGNORE INTO publication VALUES ({})'.format(q),
values)
pid = self.get_last_id(cur, table='publication')
if self.connection is None:
con.commit()
con.close()
return pid | python | def write_publication(self, values):
"""
Write publication info to db
Parameters
----------
values: dict with entries
{'pub_id': str (short name for publication),
'authors': list of str ()
'journal': str,
'volume': str,
'number': str,
'pages': 'str'
'year': int,
'publisher': str,
'doi': str,
'tags': list of str}
"""
con = self.connection or self._connect()
self._initialize(con)
cur = con.cursor()
values = (values['pub_id'],
values['title'],
json.dumps(values['authors']),
values['journal'],
values['volume'],
values['number'],
values['pages'],
values['year'],
values['publisher'],
values['doi'],
json.dumps(values['tags']))
q = self.default + ',' + ', '.join('?' * len(values))
cur.execute('INSERT OR IGNORE INTO publication VALUES ({})'.format(q),
values)
pid = self.get_last_id(cur, table='publication')
if self.connection is None:
con.commit()
con.close()
return pid | [
"def",
"write_publication",
"(",
"self",
",",
"values",
")",
":",
"con",
"=",
"self",
".",
"connection",
"or",
"self",
".",
"_connect",
"(",
")",
"self",
".",
"_initialize",
"(",
"con",
")",
"cur",
"=",
"con",
".",
"cursor",
"(",
")",
"values",
"=",
"(",
"values",
"[",
"'pub_id'",
"]",
",",
"values",
"[",
"'title'",
"]",
",",
"json",
".",
"dumps",
"(",
"values",
"[",
"'authors'",
"]",
")",
",",
"values",
"[",
"'journal'",
"]",
",",
"values",
"[",
"'volume'",
"]",
",",
"values",
"[",
"'number'",
"]",
",",
"values",
"[",
"'pages'",
"]",
",",
"values",
"[",
"'year'",
"]",
",",
"values",
"[",
"'publisher'",
"]",
",",
"values",
"[",
"'doi'",
"]",
",",
"json",
".",
"dumps",
"(",
"values",
"[",
"'tags'",
"]",
")",
")",
"q",
"=",
"self",
".",
"default",
"+",
"','",
"+",
"', '",
".",
"join",
"(",
"'?'",
"*",
"len",
"(",
"values",
")",
")",
"cur",
".",
"execute",
"(",
"'INSERT OR IGNORE INTO publication VALUES ({})'",
".",
"format",
"(",
"q",
")",
",",
"values",
")",
"pid",
"=",
"self",
".",
"get_last_id",
"(",
"cur",
",",
"table",
"=",
"'publication'",
")",
"if",
"self",
".",
"connection",
"is",
"None",
":",
"con",
".",
"commit",
"(",
")",
"con",
".",
"close",
"(",
")",
"return",
"pid"
] | Write publication info to db
Parameters
----------
values: dict with entries
{'pub_id': str (short name for publication),
'authors': list of str ()
'journal': str,
'volume': str,
'number': str,
'pages': 'str'
'year': int,
'publisher': str,
'doi': str,
'tags': list of str} | [
"Write",
"publication",
"info",
"to",
"db"
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/cathubsqlite.py#L157-L201 | train |
SUNCAT-Center/CatHub | cathub/cathubsqlite.py | CathubSQLite.write | def write(self, values, data=None):
"""
Write reaction info to db file
Parameters
----------
values: dict
The values dict can include:
{'chemical_composition': str (chemical composition on empty slab) ,
'surface_composition': str (reduced chemical composition or
shortname),
'facet': str
'sites': dict
adsorption sites of species.
f.ex: {'OH': 'ontop', 'O': 'hollow'}
'coverages': dict
coverage of adsorbates relative to the unit cell
f.ex. {'OH': 0.25, 'O': 0.5})
'reactants'/ 'products': dict
keys with name of chemical species folloved by phase (gas, *)
values are the prefactor in the reaction.
For reaction H2Ogas -> 2Hstar + O star you would write:
'reactants': {OHstar: 1, Hstar: 2}
'products': {OHstar: 1, Hstar: 2}
'reaction_energy': float
'activation_energy': float
'dft_code': str
'dft_functional': str
'username': str
'pub_id': str
Should match the pub_id of the corresponding publications
}
"""
con = self.connection or self._connect()
self._initialize(con)
cur = con.cursor()
pub_id = values['pub_id']
ase_ids = values['ase_ids']
energy_corrections = values['energy_corrections']
if ase_ids is not None:
check_ase_ids(values, ase_ids)
else:
ase_ids = {}
values = (values['chemical_composition'],
values['surface_composition'],
values['facet'],
json.dumps(values['sites']),
json.dumps(values['coverages']),
json.dumps(values['reactants']),
json.dumps(values['products']),
values['reaction_energy'],
values['activation_energy'],
values['dft_code'],
values['dft_functional'],
values['username'],
values['pub_id']
)
""" Write to reaction table"""
q = self.default + ',' + ', '.join('?' * len(values))
cur.execute('INSERT INTO reaction VALUES ({})'.format(q),
values)
id = self.get_last_id(cur)
reaction_structure_values = []
""" Write to publication_system and reaction_system tables"""
for name, ase_id in ase_ids.items():
if name in energy_corrections:
energy_correction = energy_corrections[name]
else:
energy_correction = 0
reaction_structure_values.append([name, energy_correction,
ase_id, id])
insert_statement = """INSERT OR IGNORE INTO
publication_system(ase_id, pub_id) VALUES (?, ?)"""
cur.execute(insert_statement, [ase_id, pub_id])
cur.executemany('INSERT INTO reaction_system VALUES (?, ?, ?, ?)',
reaction_structure_values)
if self.connection is None:
con.commit()
con.close()
return id | python | def write(self, values, data=None):
"""
Write reaction info to db file
Parameters
----------
values: dict
The values dict can include:
{'chemical_composition': str (chemical composition on empty slab) ,
'surface_composition': str (reduced chemical composition or
shortname),
'facet': str
'sites': dict
adsorption sites of species.
f.ex: {'OH': 'ontop', 'O': 'hollow'}
'coverages': dict
coverage of adsorbates relative to the unit cell
f.ex. {'OH': 0.25, 'O': 0.5})
'reactants'/ 'products': dict
keys with name of chemical species folloved by phase (gas, *)
values are the prefactor in the reaction.
For reaction H2Ogas -> 2Hstar + O star you would write:
'reactants': {OHstar: 1, Hstar: 2}
'products': {OHstar: 1, Hstar: 2}
'reaction_energy': float
'activation_energy': float
'dft_code': str
'dft_functional': str
'username': str
'pub_id': str
Should match the pub_id of the corresponding publications
}
"""
con = self.connection or self._connect()
self._initialize(con)
cur = con.cursor()
pub_id = values['pub_id']
ase_ids = values['ase_ids']
energy_corrections = values['energy_corrections']
if ase_ids is not None:
check_ase_ids(values, ase_ids)
else:
ase_ids = {}
values = (values['chemical_composition'],
values['surface_composition'],
values['facet'],
json.dumps(values['sites']),
json.dumps(values['coverages']),
json.dumps(values['reactants']),
json.dumps(values['products']),
values['reaction_energy'],
values['activation_energy'],
values['dft_code'],
values['dft_functional'],
values['username'],
values['pub_id']
)
""" Write to reaction table"""
q = self.default + ',' + ', '.join('?' * len(values))
cur.execute('INSERT INTO reaction VALUES ({})'.format(q),
values)
id = self.get_last_id(cur)
reaction_structure_values = []
""" Write to publication_system and reaction_system tables"""
for name, ase_id in ase_ids.items():
if name in energy_corrections:
energy_correction = energy_corrections[name]
else:
energy_correction = 0
reaction_structure_values.append([name, energy_correction,
ase_id, id])
insert_statement = """INSERT OR IGNORE INTO
publication_system(ase_id, pub_id) VALUES (?, ?)"""
cur.execute(insert_statement, [ase_id, pub_id])
cur.executemany('INSERT INTO reaction_system VALUES (?, ?, ?, ?)',
reaction_structure_values)
if self.connection is None:
con.commit()
con.close()
return id | [
"def",
"write",
"(",
"self",
",",
"values",
",",
"data",
"=",
"None",
")",
":",
"con",
"=",
"self",
".",
"connection",
"or",
"self",
".",
"_connect",
"(",
")",
"self",
".",
"_initialize",
"(",
"con",
")",
"cur",
"=",
"con",
".",
"cursor",
"(",
")",
"pub_id",
"=",
"values",
"[",
"'pub_id'",
"]",
"ase_ids",
"=",
"values",
"[",
"'ase_ids'",
"]",
"energy_corrections",
"=",
"values",
"[",
"'energy_corrections'",
"]",
"if",
"ase_ids",
"is",
"not",
"None",
":",
"check_ase_ids",
"(",
"values",
",",
"ase_ids",
")",
"else",
":",
"ase_ids",
"=",
"{",
"}",
"values",
"=",
"(",
"values",
"[",
"'chemical_composition'",
"]",
",",
"values",
"[",
"'surface_composition'",
"]",
",",
"values",
"[",
"'facet'",
"]",
",",
"json",
".",
"dumps",
"(",
"values",
"[",
"'sites'",
"]",
")",
",",
"json",
".",
"dumps",
"(",
"values",
"[",
"'coverages'",
"]",
")",
",",
"json",
".",
"dumps",
"(",
"values",
"[",
"'reactants'",
"]",
")",
",",
"json",
".",
"dumps",
"(",
"values",
"[",
"'products'",
"]",
")",
",",
"values",
"[",
"'reaction_energy'",
"]",
",",
"values",
"[",
"'activation_energy'",
"]",
",",
"values",
"[",
"'dft_code'",
"]",
",",
"values",
"[",
"'dft_functional'",
"]",
",",
"values",
"[",
"'username'",
"]",
",",
"values",
"[",
"'pub_id'",
"]",
")",
"\"\"\" Write to reaction table\"\"\"",
"q",
"=",
"self",
".",
"default",
"+",
"','",
"+",
"', '",
".",
"join",
"(",
"'?'",
"*",
"len",
"(",
"values",
")",
")",
"cur",
".",
"execute",
"(",
"'INSERT INTO reaction VALUES ({})'",
".",
"format",
"(",
"q",
")",
",",
"values",
")",
"id",
"=",
"self",
".",
"get_last_id",
"(",
"cur",
")",
"reaction_structure_values",
"=",
"[",
"]",
"\"\"\" Write to publication_system and reaction_system tables\"\"\"",
"for",
"name",
",",
"ase_id",
"in",
"ase_ids",
".",
"items",
"(",
")",
":",
"if",
"name",
"in",
"energy_corrections",
":",
"energy_correction",
"=",
"energy_corrections",
"[",
"name",
"]",
"else",
":",
"energy_correction",
"=",
"0",
"reaction_structure_values",
".",
"append",
"(",
"[",
"name",
",",
"energy_correction",
",",
"ase_id",
",",
"id",
"]",
")",
"insert_statement",
"=",
"\"\"\"INSERT OR IGNORE INTO\n publication_system(ase_id, pub_id) VALUES (?, ?)\"\"\"",
"cur",
".",
"execute",
"(",
"insert_statement",
",",
"[",
"ase_id",
",",
"pub_id",
"]",
")",
"cur",
".",
"executemany",
"(",
"'INSERT INTO reaction_system VALUES (?, ?, ?, ?)'",
",",
"reaction_structure_values",
")",
"if",
"self",
".",
"connection",
"is",
"None",
":",
"con",
".",
"commit",
"(",
")",
"con",
".",
"close",
"(",
")",
"return",
"id"
] | Write reaction info to db file
Parameters
----------
values: dict
The values dict can include:
{'chemical_composition': str (chemical composition on empty slab) ,
'surface_composition': str (reduced chemical composition or
shortname),
'facet': str
'sites': dict
adsorption sites of species.
f.ex: {'OH': 'ontop', 'O': 'hollow'}
'coverages': dict
coverage of adsorbates relative to the unit cell
f.ex. {'OH': 0.25, 'O': 0.5})
'reactants'/ 'products': dict
keys with name of chemical species folloved by phase (gas, *)
values are the prefactor in the reaction.
For reaction H2Ogas -> 2Hstar + O star you would write:
'reactants': {OHstar: 1, Hstar: 2}
'products': {OHstar: 1, Hstar: 2}
'reaction_energy': float
'activation_energy': float
'dft_code': str
'dft_functional': str
'username': str
'pub_id': str
Should match the pub_id of the corresponding publications
} | [
"Write",
"reaction",
"info",
"to",
"db",
"file"
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/cathubsqlite.py#L203-L291 | train |
SUNCAT-Center/CatHub | cathub/cathubsqlite.py | CathubSQLite.update | def update(self, id, values, key_names='all'):
"""
Update reaction info for a selected row
Parameters
----------
id: int
row integer
values: dict
See write() method for details
key_names: list or 'all'
list with name of columns to update. Should match the keys-value
pairs in values.
default is 'all'
"""
con = self.connection or self._connect()
self._initialize(con)
cur = con.cursor()
pub_id = values['pub_id']
ase_ids = values['ase_ids']
energy_corrections = values['energy_corrections']
if ase_ids is not None:
check_ase_ids(values, ase_ids)
else:
ase_ids = {}
key_list, value_list = get_key_value_list(key_names, values)
N_keys = len(key_list)
value_strlist = get_value_strlist(value_list)
execute_str = ', '.join('{}={}'.format(key_list[i], value_strlist[i])
for i in range(N_keys))
update_command = 'UPDATE reaction SET {} WHERE id = {};'\
.format(execute_str, id)
cur.execute(update_command)
delete_command = 'DELETE from reaction_system WHERE id = {}'.format(id)
cur.execute(delete_command)
reaction_structure_values = []
for name, ase_id in ase_ids.items():
reaction_structure_values.append([name,
energy_corrections.get(name),
ase_id, id])
insert_statement = """INSERT OR IGNORE INTO
publication_system(ase_id, pub_id) VALUES (?, ?)"""
cur.execute(insert_statement, [ase_id, pub_id])
cur.executemany('INSERT INTO reaction_system VALUES (?, ?, ?, ?)',
reaction_structure_values)
if self.connection is None:
con.commit()
con.close()
return id | python | def update(self, id, values, key_names='all'):
"""
Update reaction info for a selected row
Parameters
----------
id: int
row integer
values: dict
See write() method for details
key_names: list or 'all'
list with name of columns to update. Should match the keys-value
pairs in values.
default is 'all'
"""
con = self.connection or self._connect()
self._initialize(con)
cur = con.cursor()
pub_id = values['pub_id']
ase_ids = values['ase_ids']
energy_corrections = values['energy_corrections']
if ase_ids is not None:
check_ase_ids(values, ase_ids)
else:
ase_ids = {}
key_list, value_list = get_key_value_list(key_names, values)
N_keys = len(key_list)
value_strlist = get_value_strlist(value_list)
execute_str = ', '.join('{}={}'.format(key_list[i], value_strlist[i])
for i in range(N_keys))
update_command = 'UPDATE reaction SET {} WHERE id = {};'\
.format(execute_str, id)
cur.execute(update_command)
delete_command = 'DELETE from reaction_system WHERE id = {}'.format(id)
cur.execute(delete_command)
reaction_structure_values = []
for name, ase_id in ase_ids.items():
reaction_structure_values.append([name,
energy_corrections.get(name),
ase_id, id])
insert_statement = """INSERT OR IGNORE INTO
publication_system(ase_id, pub_id) VALUES (?, ?)"""
cur.execute(insert_statement, [ase_id, pub_id])
cur.executemany('INSERT INTO reaction_system VALUES (?, ?, ?, ?)',
reaction_structure_values)
if self.connection is None:
con.commit()
con.close()
return id | [
"def",
"update",
"(",
"self",
",",
"id",
",",
"values",
",",
"key_names",
"=",
"'all'",
")",
":",
"con",
"=",
"self",
".",
"connection",
"or",
"self",
".",
"_connect",
"(",
")",
"self",
".",
"_initialize",
"(",
"con",
")",
"cur",
"=",
"con",
".",
"cursor",
"(",
")",
"pub_id",
"=",
"values",
"[",
"'pub_id'",
"]",
"ase_ids",
"=",
"values",
"[",
"'ase_ids'",
"]",
"energy_corrections",
"=",
"values",
"[",
"'energy_corrections'",
"]",
"if",
"ase_ids",
"is",
"not",
"None",
":",
"check_ase_ids",
"(",
"values",
",",
"ase_ids",
")",
"else",
":",
"ase_ids",
"=",
"{",
"}",
"key_list",
",",
"value_list",
"=",
"get_key_value_list",
"(",
"key_names",
",",
"values",
")",
"N_keys",
"=",
"len",
"(",
"key_list",
")",
"value_strlist",
"=",
"get_value_strlist",
"(",
"value_list",
")",
"execute_str",
"=",
"', '",
".",
"join",
"(",
"'{}={}'",
".",
"format",
"(",
"key_list",
"[",
"i",
"]",
",",
"value_strlist",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"N_keys",
")",
")",
"update_command",
"=",
"'UPDATE reaction SET {} WHERE id = {};'",
".",
"format",
"(",
"execute_str",
",",
"id",
")",
"cur",
".",
"execute",
"(",
"update_command",
")",
"delete_command",
"=",
"'DELETE from reaction_system WHERE id = {}'",
".",
"format",
"(",
"id",
")",
"cur",
".",
"execute",
"(",
"delete_command",
")",
"reaction_structure_values",
"=",
"[",
"]",
"for",
"name",
",",
"ase_id",
"in",
"ase_ids",
".",
"items",
"(",
")",
":",
"reaction_structure_values",
".",
"append",
"(",
"[",
"name",
",",
"energy_corrections",
".",
"get",
"(",
"name",
")",
",",
"ase_id",
",",
"id",
"]",
")",
"insert_statement",
"=",
"\"\"\"INSERT OR IGNORE INTO\n publication_system(ase_id, pub_id) VALUES (?, ?)\"\"\"",
"cur",
".",
"execute",
"(",
"insert_statement",
",",
"[",
"ase_id",
",",
"pub_id",
"]",
")",
"cur",
".",
"executemany",
"(",
"'INSERT INTO reaction_system VALUES (?, ?, ?, ?)'",
",",
"reaction_structure_values",
")",
"if",
"self",
".",
"connection",
"is",
"None",
":",
"con",
".",
"commit",
"(",
")",
"con",
".",
"close",
"(",
")",
"return",
"id"
] | Update reaction info for a selected row
Parameters
----------
id: int
row integer
values: dict
See write() method for details
key_names: list or 'all'
list with name of columns to update. Should match the keys-value
pairs in values.
default is 'all' | [
"Update",
"reaction",
"info",
"for",
"a",
"selected",
"row"
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/cathubsqlite.py#L293-L351 | train |
SUNCAT-Center/CatHub | cathub/cathubsqlite.py | CathubSQLite.get_last_id | def get_last_id(self, cur, table='reaction'):
"""
Get the id of the last written row in table
Parameters
----------
cur: database connection().cursor() object
table: str
'reaction', 'publication', 'publication_system', 'reaction_system'
Returns: id
"""
cur.execute("SELECT seq FROM sqlite_sequence WHERE name='{0}'"
.format(table))
result = cur.fetchone()
if result is not None:
id = result[0]
else:
id = 0
return id | python | def get_last_id(self, cur, table='reaction'):
"""
Get the id of the last written row in table
Parameters
----------
cur: database connection().cursor() object
table: str
'reaction', 'publication', 'publication_system', 'reaction_system'
Returns: id
"""
cur.execute("SELECT seq FROM sqlite_sequence WHERE name='{0}'"
.format(table))
result = cur.fetchone()
if result is not None:
id = result[0]
else:
id = 0
return id | [
"def",
"get_last_id",
"(",
"self",
",",
"cur",
",",
"table",
"=",
"'reaction'",
")",
":",
"cur",
".",
"execute",
"(",
"\"SELECT seq FROM sqlite_sequence WHERE name='{0}'\"",
".",
"format",
"(",
"table",
")",
")",
"result",
"=",
"cur",
".",
"fetchone",
"(",
")",
"if",
"result",
"is",
"not",
"None",
":",
"id",
"=",
"result",
"[",
"0",
"]",
"else",
":",
"id",
"=",
"0",
"return",
"id"
] | Get the id of the last written row in table
Parameters
----------
cur: database connection().cursor() object
table: str
'reaction', 'publication', 'publication_system', 'reaction_system'
Returns: id | [
"Get",
"the",
"id",
"of",
"the",
"last",
"written",
"row",
"in",
"table"
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/cathubsqlite.py#L353-L372 | train |
guaix-ucm/numina | numina/array/wavecalib/__main__.py | read_wv_master_from_array | def read_wv_master_from_array(master_table, lines='brightest', debugplot=0):
"""read arc line wavelengths from numpy array
Parameters
----------
master_table : Numpy array
Numpy array containing the wavelength database.
lines : string
Indicates which lines to read. For files with a single column
or two columns this parameter is irrelevant. For files with
three columns, lines='brightest' indicates that only the
brightest lines are read, whereas lines='all' means that all
the lines are considered.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
wv_master : 1d numpy array
Array with arc line wavelengths.
"""
# protection
if lines not in ['brightest', 'all']:
raise ValueError('Unexpected lines=' + str(lines))
# determine wavelengths according to the number of columns
if master_table.ndim == 1:
wv_master = master_table
else:
wv_master_all = master_table[:, 0]
if master_table.shape[1] == 2: # assume old format
wv_master = np.copy(wv_master_all)
elif master_table.shape[1] == 3: # assume new format
if lines == 'brightest':
wv_flag = master_table[:, 1]
wv_master = wv_master_all[np.where(wv_flag == 1)]
else:
wv_master = np.copy(wv_master_all)
else:
raise ValueError('Lines_catalog file does not have the '
'expected number of columns')
if abs(debugplot) >= 10:
print("Reading master table from numpy array")
print("wv_master:\n", wv_master)
return wv_master | python | def read_wv_master_from_array(master_table, lines='brightest', debugplot=0):
"""read arc line wavelengths from numpy array
Parameters
----------
master_table : Numpy array
Numpy array containing the wavelength database.
lines : string
Indicates which lines to read. For files with a single column
or two columns this parameter is irrelevant. For files with
three columns, lines='brightest' indicates that only the
brightest lines are read, whereas lines='all' means that all
the lines are considered.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
wv_master : 1d numpy array
Array with arc line wavelengths.
"""
# protection
if lines not in ['brightest', 'all']:
raise ValueError('Unexpected lines=' + str(lines))
# determine wavelengths according to the number of columns
if master_table.ndim == 1:
wv_master = master_table
else:
wv_master_all = master_table[:, 0]
if master_table.shape[1] == 2: # assume old format
wv_master = np.copy(wv_master_all)
elif master_table.shape[1] == 3: # assume new format
if lines == 'brightest':
wv_flag = master_table[:, 1]
wv_master = wv_master_all[np.where(wv_flag == 1)]
else:
wv_master = np.copy(wv_master_all)
else:
raise ValueError('Lines_catalog file does not have the '
'expected number of columns')
if abs(debugplot) >= 10:
print("Reading master table from numpy array")
print("wv_master:\n", wv_master)
return wv_master | [
"def",
"read_wv_master_from_array",
"(",
"master_table",
",",
"lines",
"=",
"'brightest'",
",",
"debugplot",
"=",
"0",
")",
":",
"# protection",
"if",
"lines",
"not",
"in",
"[",
"'brightest'",
",",
"'all'",
"]",
":",
"raise",
"ValueError",
"(",
"'Unexpected lines='",
"+",
"str",
"(",
"lines",
")",
")",
"# determine wavelengths according to the number of columns",
"if",
"master_table",
".",
"ndim",
"==",
"1",
":",
"wv_master",
"=",
"master_table",
"else",
":",
"wv_master_all",
"=",
"master_table",
"[",
":",
",",
"0",
"]",
"if",
"master_table",
".",
"shape",
"[",
"1",
"]",
"==",
"2",
":",
"# assume old format",
"wv_master",
"=",
"np",
".",
"copy",
"(",
"wv_master_all",
")",
"elif",
"master_table",
".",
"shape",
"[",
"1",
"]",
"==",
"3",
":",
"# assume new format",
"if",
"lines",
"==",
"'brightest'",
":",
"wv_flag",
"=",
"master_table",
"[",
":",
",",
"1",
"]",
"wv_master",
"=",
"wv_master_all",
"[",
"np",
".",
"where",
"(",
"wv_flag",
"==",
"1",
")",
"]",
"else",
":",
"wv_master",
"=",
"np",
".",
"copy",
"(",
"wv_master_all",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Lines_catalog file does not have the '",
"'expected number of columns'",
")",
"if",
"abs",
"(",
"debugplot",
")",
">=",
"10",
":",
"print",
"(",
"\"Reading master table from numpy array\"",
")",
"print",
"(",
"\"wv_master:\\n\"",
",",
"wv_master",
")",
"return",
"wv_master"
] | read arc line wavelengths from numpy array
Parameters
----------
master_table : Numpy array
Numpy array containing the wavelength database.
lines : string
Indicates which lines to read. For files with a single column
or two columns this parameter is irrelevant. For files with
three columns, lines='brightest' indicates that only the
brightest lines are read, whereas lines='all' means that all
the lines are considered.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
wv_master : 1d numpy array
Array with arc line wavelengths. | [
"read",
"arc",
"line",
"wavelengths",
"from",
"numpy",
"array"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/__main__.py#L114-L164 | train |
guaix-ucm/numina | numina/array/wavecalib/__main__.py | read_wv_master_file | def read_wv_master_file(wv_master_file, lines='brightest', debugplot=0):
"""read arc line wavelengths from external file.
Parameters
----------
wv_master_file : string
File name of txt file containing the wavelength database.
lines : string
Indicates which lines to read. For files with a single column
or two columns this parameter is irrelevant. For files with
three columns, lines='brightest' indicates that only the
brightest lines are read, whereas lines='all' means that all
the lines are considered.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
wv_master : 1d numpy array
Array with arc line wavelengths.
"""
# protection
if lines not in ['brightest', 'all']:
raise ValueError('Unexpected lines=' + str(lines))
# read table from txt file
master_table = np.genfromtxt(wv_master_file)
wv_master = read_wv_master_from_array(master_table, lines)
if abs(debugplot) >= 10:
print("Reading master table: " + wv_master_file)
print("wv_master:\n", wv_master)
return wv_master | python | def read_wv_master_file(wv_master_file, lines='brightest', debugplot=0):
"""read arc line wavelengths from external file.
Parameters
----------
wv_master_file : string
File name of txt file containing the wavelength database.
lines : string
Indicates which lines to read. For files with a single column
or two columns this parameter is irrelevant. For files with
three columns, lines='brightest' indicates that only the
brightest lines are read, whereas lines='all' means that all
the lines are considered.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
wv_master : 1d numpy array
Array with arc line wavelengths.
"""
# protection
if lines not in ['brightest', 'all']:
raise ValueError('Unexpected lines=' + str(lines))
# read table from txt file
master_table = np.genfromtxt(wv_master_file)
wv_master = read_wv_master_from_array(master_table, lines)
if abs(debugplot) >= 10:
print("Reading master table: " + wv_master_file)
print("wv_master:\n", wv_master)
return wv_master | [
"def",
"read_wv_master_file",
"(",
"wv_master_file",
",",
"lines",
"=",
"'brightest'",
",",
"debugplot",
"=",
"0",
")",
":",
"# protection",
"if",
"lines",
"not",
"in",
"[",
"'brightest'",
",",
"'all'",
"]",
":",
"raise",
"ValueError",
"(",
"'Unexpected lines='",
"+",
"str",
"(",
"lines",
")",
")",
"# read table from txt file",
"master_table",
"=",
"np",
".",
"genfromtxt",
"(",
"wv_master_file",
")",
"wv_master",
"=",
"read_wv_master_from_array",
"(",
"master_table",
",",
"lines",
")",
"if",
"abs",
"(",
"debugplot",
")",
">=",
"10",
":",
"print",
"(",
"\"Reading master table: \"",
"+",
"wv_master_file",
")",
"print",
"(",
"\"wv_master:\\n\"",
",",
"wv_master",
")",
"return",
"wv_master"
] | read arc line wavelengths from external file.
Parameters
----------
wv_master_file : string
File name of txt file containing the wavelength database.
lines : string
Indicates which lines to read. For files with a single column
or two columns this parameter is irrelevant. For files with
three columns, lines='brightest' indicates that only the
brightest lines are read, whereas lines='all' means that all
the lines are considered.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
wv_master : 1d numpy array
Array with arc line wavelengths. | [
"read",
"arc",
"line",
"wavelengths",
"from",
"external",
"file",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/__main__.py#L167-L205 | train |
guaix-ucm/numina | numina/array/wavecalib/__main__.py | wvcal_spectrum | def wvcal_spectrum(sp, fxpeaks, poly_degree_wfit, wv_master,
wv_ini_search=None, wv_end_search=None,
wvmin_useful=None, wvmax_useful=None,
geometry=None, debugplot=0):
"""Execute wavelength calibration of a spectrum using fixed line peaks.
Parameters
----------
sp : 1d numpy array
Spectrum to be wavelength calibrated.
fxpeaks : 1d numpy array
Refined location of peaks in array index scale, i.e, from 0
to naxis1 - 1. The wavelength calibration is performed using
these line locations.
poly_degree_wfit : int
Degree for wavelength calibration polynomial.
wv_master : 1d numpy array
Array with arc line wavelengths.
wv_ini_search : float or None
Minimum expected wavelength in spectrum.
wv_end_search : float or None
Maximum expected wavelength in spectrum.
wvmin_useful : float or None
If not None, this value is used to clip detected lines below it.
wvmax_useful : float or None
If not None, this value is used to clip detected lines above it.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the Qt backend geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
solution_wv : instance of SolutionArcCalibration
Wavelength calibration solution.
"""
# check there are enough lines for fit
if len(fxpeaks) <= poly_degree_wfit:
print(">>> Warning: not enough lines to fit spectrum")
return None
# spectrum dimension
naxis1 = sp.shape[0]
wv_master_range = wv_master[-1] - wv_master[0]
delta_wv_master_range = 0.20 * wv_master_range
if wv_ini_search is None:
wv_ini_search = wv_master[0] - delta_wv_master_range
if wv_end_search is None:
wv_end_search = wv_master[-1] + delta_wv_master_range
# use channels (pixels from 1 to naxis1)
xchannel = fxpeaks + 1.0
# wavelength calibration
list_of_wvfeatures = arccalibration(
wv_master=wv_master,
xpos_arc=xchannel,
naxis1_arc=naxis1,
crpix1=1.0,
wv_ini_search=wv_ini_search,
wv_end_search=wv_end_search,
wvmin_useful=wvmin_useful,
wvmax_useful=wvmax_useful,
error_xpos_arc=3,
times_sigma_r=3.0,
frac_triplets_for_sum=0.50,
times_sigma_theil_sen=10.0,
poly_degree_wfit=poly_degree_wfit,
times_sigma_polfilt=10.0,
times_sigma_cook=10.0,
times_sigma_inclusion=10.0,
geometry=geometry,
debugplot=debugplot
)
title = "Wavelength calibration"
solution_wv = fit_list_of_wvfeatures(
list_of_wvfeatures=list_of_wvfeatures,
naxis1_arc=naxis1,
crpix1=1.0,
poly_degree_wfit=poly_degree_wfit,
weighted=False,
plot_title=title,
geometry=geometry,
debugplot=debugplot
)
if abs(debugplot) % 10 != 0:
# final plot with identified lines
xplot = np.arange(1, naxis1 + 1, dtype=float)
ax = ximplotxy(xplot, sp, title=title, show=False,
xlabel='pixel (from 1 to NAXIS1)',
ylabel='number of counts',
geometry=geometry)
ymin = sp.min()
ymax = sp.max()
dy = ymax-ymin
ymin -= dy/20.
ymax += dy/20.
ax.set_ylim([ymin, ymax])
# plot wavelength of each identified line
for feature in solution_wv.features:
xpos = feature.xpos
reference = feature.reference
ax.text(xpos, sp[int(xpos+0.5)-1],
str(reference), fontsize=8,
horizontalalignment='center')
# show plot
print('Plot with identified lines')
pause_debugplot(12, pltshow=True)
# return the wavelength calibration solution
return solution_wv | python | def wvcal_spectrum(sp, fxpeaks, poly_degree_wfit, wv_master,
wv_ini_search=None, wv_end_search=None,
wvmin_useful=None, wvmax_useful=None,
geometry=None, debugplot=0):
"""Execute wavelength calibration of a spectrum using fixed line peaks.
Parameters
----------
sp : 1d numpy array
Spectrum to be wavelength calibrated.
fxpeaks : 1d numpy array
Refined location of peaks in array index scale, i.e, from 0
to naxis1 - 1. The wavelength calibration is performed using
these line locations.
poly_degree_wfit : int
Degree for wavelength calibration polynomial.
wv_master : 1d numpy array
Array with arc line wavelengths.
wv_ini_search : float or None
Minimum expected wavelength in spectrum.
wv_end_search : float or None
Maximum expected wavelength in spectrum.
wvmin_useful : float or None
If not None, this value is used to clip detected lines below it.
wvmax_useful : float or None
If not None, this value is used to clip detected lines above it.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the Qt backend geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
solution_wv : instance of SolutionArcCalibration
Wavelength calibration solution.
"""
# check there are enough lines for fit
if len(fxpeaks) <= poly_degree_wfit:
print(">>> Warning: not enough lines to fit spectrum")
return None
# spectrum dimension
naxis1 = sp.shape[0]
wv_master_range = wv_master[-1] - wv_master[0]
delta_wv_master_range = 0.20 * wv_master_range
if wv_ini_search is None:
wv_ini_search = wv_master[0] - delta_wv_master_range
if wv_end_search is None:
wv_end_search = wv_master[-1] + delta_wv_master_range
# use channels (pixels from 1 to naxis1)
xchannel = fxpeaks + 1.0
# wavelength calibration
list_of_wvfeatures = arccalibration(
wv_master=wv_master,
xpos_arc=xchannel,
naxis1_arc=naxis1,
crpix1=1.0,
wv_ini_search=wv_ini_search,
wv_end_search=wv_end_search,
wvmin_useful=wvmin_useful,
wvmax_useful=wvmax_useful,
error_xpos_arc=3,
times_sigma_r=3.0,
frac_triplets_for_sum=0.50,
times_sigma_theil_sen=10.0,
poly_degree_wfit=poly_degree_wfit,
times_sigma_polfilt=10.0,
times_sigma_cook=10.0,
times_sigma_inclusion=10.0,
geometry=geometry,
debugplot=debugplot
)
title = "Wavelength calibration"
solution_wv = fit_list_of_wvfeatures(
list_of_wvfeatures=list_of_wvfeatures,
naxis1_arc=naxis1,
crpix1=1.0,
poly_degree_wfit=poly_degree_wfit,
weighted=False,
plot_title=title,
geometry=geometry,
debugplot=debugplot
)
if abs(debugplot) % 10 != 0:
# final plot with identified lines
xplot = np.arange(1, naxis1 + 1, dtype=float)
ax = ximplotxy(xplot, sp, title=title, show=False,
xlabel='pixel (from 1 to NAXIS1)',
ylabel='number of counts',
geometry=geometry)
ymin = sp.min()
ymax = sp.max()
dy = ymax-ymin
ymin -= dy/20.
ymax += dy/20.
ax.set_ylim([ymin, ymax])
# plot wavelength of each identified line
for feature in solution_wv.features:
xpos = feature.xpos
reference = feature.reference
ax.text(xpos, sp[int(xpos+0.5)-1],
str(reference), fontsize=8,
horizontalalignment='center')
# show plot
print('Plot with identified lines')
pause_debugplot(12, pltshow=True)
# return the wavelength calibration solution
return solution_wv | [
"def",
"wvcal_spectrum",
"(",
"sp",
",",
"fxpeaks",
",",
"poly_degree_wfit",
",",
"wv_master",
",",
"wv_ini_search",
"=",
"None",
",",
"wv_end_search",
"=",
"None",
",",
"wvmin_useful",
"=",
"None",
",",
"wvmax_useful",
"=",
"None",
",",
"geometry",
"=",
"None",
",",
"debugplot",
"=",
"0",
")",
":",
"# check there are enough lines for fit",
"if",
"len",
"(",
"fxpeaks",
")",
"<=",
"poly_degree_wfit",
":",
"print",
"(",
"\">>> Warning: not enough lines to fit spectrum\"",
")",
"return",
"None",
"# spectrum dimension",
"naxis1",
"=",
"sp",
".",
"shape",
"[",
"0",
"]",
"wv_master_range",
"=",
"wv_master",
"[",
"-",
"1",
"]",
"-",
"wv_master",
"[",
"0",
"]",
"delta_wv_master_range",
"=",
"0.20",
"*",
"wv_master_range",
"if",
"wv_ini_search",
"is",
"None",
":",
"wv_ini_search",
"=",
"wv_master",
"[",
"0",
"]",
"-",
"delta_wv_master_range",
"if",
"wv_end_search",
"is",
"None",
":",
"wv_end_search",
"=",
"wv_master",
"[",
"-",
"1",
"]",
"+",
"delta_wv_master_range",
"# use channels (pixels from 1 to naxis1)",
"xchannel",
"=",
"fxpeaks",
"+",
"1.0",
"# wavelength calibration",
"list_of_wvfeatures",
"=",
"arccalibration",
"(",
"wv_master",
"=",
"wv_master",
",",
"xpos_arc",
"=",
"xchannel",
",",
"naxis1_arc",
"=",
"naxis1",
",",
"crpix1",
"=",
"1.0",
",",
"wv_ini_search",
"=",
"wv_ini_search",
",",
"wv_end_search",
"=",
"wv_end_search",
",",
"wvmin_useful",
"=",
"wvmin_useful",
",",
"wvmax_useful",
"=",
"wvmax_useful",
",",
"error_xpos_arc",
"=",
"3",
",",
"times_sigma_r",
"=",
"3.0",
",",
"frac_triplets_for_sum",
"=",
"0.50",
",",
"times_sigma_theil_sen",
"=",
"10.0",
",",
"poly_degree_wfit",
"=",
"poly_degree_wfit",
",",
"times_sigma_polfilt",
"=",
"10.0",
",",
"times_sigma_cook",
"=",
"10.0",
",",
"times_sigma_inclusion",
"=",
"10.0",
",",
"geometry",
"=",
"geometry",
",",
"debugplot",
"=",
"debugplot",
")",
"title",
"=",
"\"Wavelength calibration\"",
"solution_wv",
"=",
"fit_list_of_wvfeatures",
"(",
"list_of_wvfeatures",
"=",
"list_of_wvfeatures",
",",
"naxis1_arc",
"=",
"naxis1",
",",
"crpix1",
"=",
"1.0",
",",
"poly_degree_wfit",
"=",
"poly_degree_wfit",
",",
"weighted",
"=",
"False",
",",
"plot_title",
"=",
"title",
",",
"geometry",
"=",
"geometry",
",",
"debugplot",
"=",
"debugplot",
")",
"if",
"abs",
"(",
"debugplot",
")",
"%",
"10",
"!=",
"0",
":",
"# final plot with identified lines",
"xplot",
"=",
"np",
".",
"arange",
"(",
"1",
",",
"naxis1",
"+",
"1",
",",
"dtype",
"=",
"float",
")",
"ax",
"=",
"ximplotxy",
"(",
"xplot",
",",
"sp",
",",
"title",
"=",
"title",
",",
"show",
"=",
"False",
",",
"xlabel",
"=",
"'pixel (from 1 to NAXIS1)'",
",",
"ylabel",
"=",
"'number of counts'",
",",
"geometry",
"=",
"geometry",
")",
"ymin",
"=",
"sp",
".",
"min",
"(",
")",
"ymax",
"=",
"sp",
".",
"max",
"(",
")",
"dy",
"=",
"ymax",
"-",
"ymin",
"ymin",
"-=",
"dy",
"/",
"20.",
"ymax",
"+=",
"dy",
"/",
"20.",
"ax",
".",
"set_ylim",
"(",
"[",
"ymin",
",",
"ymax",
"]",
")",
"# plot wavelength of each identified line",
"for",
"feature",
"in",
"solution_wv",
".",
"features",
":",
"xpos",
"=",
"feature",
".",
"xpos",
"reference",
"=",
"feature",
".",
"reference",
"ax",
".",
"text",
"(",
"xpos",
",",
"sp",
"[",
"int",
"(",
"xpos",
"+",
"0.5",
")",
"-",
"1",
"]",
",",
"str",
"(",
"reference",
")",
",",
"fontsize",
"=",
"8",
",",
"horizontalalignment",
"=",
"'center'",
")",
"# show plot",
"print",
"(",
"'Plot with identified lines'",
")",
"pause_debugplot",
"(",
"12",
",",
"pltshow",
"=",
"True",
")",
"# return the wavelength calibration solution",
"return",
"solution_wv"
] | Execute wavelength calibration of a spectrum using fixed line peaks.
Parameters
----------
sp : 1d numpy array
Spectrum to be wavelength calibrated.
fxpeaks : 1d numpy array
Refined location of peaks in array index scale, i.e, from 0
to naxis1 - 1. The wavelength calibration is performed using
these line locations.
poly_degree_wfit : int
Degree for wavelength calibration polynomial.
wv_master : 1d numpy array
Array with arc line wavelengths.
wv_ini_search : float or None
Minimum expected wavelength in spectrum.
wv_end_search : float or None
Maximum expected wavelength in spectrum.
wvmin_useful : float or None
If not None, this value is used to clip detected lines below it.
wvmax_useful : float or None
If not None, this value is used to clip detected lines above it.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the Qt backend geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
solution_wv : instance of SolutionArcCalibration
Wavelength calibration solution. | [
"Execute",
"wavelength",
"calibration",
"of",
"a",
"spectrum",
"using",
"fixed",
"line",
"peaks",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/__main__.py#L382-L499 | train |
chhantyal/sorl-thumbnail-async | thumbnail/__init__.py | get_thumbnail | def get_thumbnail(file_, name):
"""
get_thumbnail version that uses aliasses defined in THUMBNAIL_OPTIONS_DICT
"""
options = settings.OPTIONS_DICT[name]
opt = copy(options)
geometry = opt.pop('geometry')
return original_get_thumbnail(file_, geometry, **opt) | python | def get_thumbnail(file_, name):
"""
get_thumbnail version that uses aliasses defined in THUMBNAIL_OPTIONS_DICT
"""
options = settings.OPTIONS_DICT[name]
opt = copy(options)
geometry = opt.pop('geometry')
return original_get_thumbnail(file_, geometry, **opt) | [
"def",
"get_thumbnail",
"(",
"file_",
",",
"name",
")",
":",
"options",
"=",
"settings",
".",
"OPTIONS_DICT",
"[",
"name",
"]",
"opt",
"=",
"copy",
"(",
"options",
")",
"geometry",
"=",
"opt",
".",
"pop",
"(",
"'geometry'",
")",
"return",
"original_get_thumbnail",
"(",
"file_",
",",
"geometry",
",",
"*",
"*",
"opt",
")"
] | get_thumbnail version that uses aliasses defined in THUMBNAIL_OPTIONS_DICT | [
"get_thumbnail",
"version",
"that",
"uses",
"aliasses",
"defined",
"in",
"THUMBNAIL_OPTIONS_DICT"
] | 023d20aac79090a691d563dc26f558bb87239811 | https://github.com/chhantyal/sorl-thumbnail-async/blob/023d20aac79090a691d563dc26f558bb87239811/thumbnail/__init__.py#L9-L17 | train |
xflr6/bitsets | bitsets/__init__.py | bitset | def bitset(name, members, base=bases.BitSet, list=False, tuple=False):
"""Return a new bitset class with given name and members.
Args:
name: Name of the class to be created.
members: Hashable sequence of allowed bitset members.
base: Base class to derive the returned class from.
list (bool): Include a custom class for bitset lists.
tuple (bool): Include a custom class for bitset tuples.
Example:
>>> Letters = bitset('Letters', 'abcdef', list=True, tuple=True)
>>> Letters # doctest: +ELLIPSIS
<class bitsets.meta.bitset('Letters', 'abcdef', 0x..., BitSet, List, Tuple)>
>>> Letters('deadbeef')
Letters(['a', 'b', 'd', 'e', 'f'])
"""
if not name:
raise ValueError('empty bitset name: %r' % name)
if not hasattr(members, '__getitem__') or not hasattr(members, '__len__'):
raise ValueError('non-sequence bitset members: %r' % members)
if not len(members):
raise ValueError('less than one bitset member: %r' % (members,))
if len(set(members)) != len(members):
raise ValueError('bitset members contains duplicates: %r' % (members,))
if not issubclass(base.__class__, meta.MemberBitsMeta):
raise ValueError('base does not subclass bitset.bases: %r' % base)
list = {False: None, True: series.List}.get(list, list)
tuple = {False: None, True: series.Tuple}.get(tuple, tuple)
return base._make_subclass(name, members, listcls=list, tuplecls=tuple) | python | def bitset(name, members, base=bases.BitSet, list=False, tuple=False):
"""Return a new bitset class with given name and members.
Args:
name: Name of the class to be created.
members: Hashable sequence of allowed bitset members.
base: Base class to derive the returned class from.
list (bool): Include a custom class for bitset lists.
tuple (bool): Include a custom class for bitset tuples.
Example:
>>> Letters = bitset('Letters', 'abcdef', list=True, tuple=True)
>>> Letters # doctest: +ELLIPSIS
<class bitsets.meta.bitset('Letters', 'abcdef', 0x..., BitSet, List, Tuple)>
>>> Letters('deadbeef')
Letters(['a', 'b', 'd', 'e', 'f'])
"""
if not name:
raise ValueError('empty bitset name: %r' % name)
if not hasattr(members, '__getitem__') or not hasattr(members, '__len__'):
raise ValueError('non-sequence bitset members: %r' % members)
if not len(members):
raise ValueError('less than one bitset member: %r' % (members,))
if len(set(members)) != len(members):
raise ValueError('bitset members contains duplicates: %r' % (members,))
if not issubclass(base.__class__, meta.MemberBitsMeta):
raise ValueError('base does not subclass bitset.bases: %r' % base)
list = {False: None, True: series.List}.get(list, list)
tuple = {False: None, True: series.Tuple}.get(tuple, tuple)
return base._make_subclass(name, members, listcls=list, tuplecls=tuple) | [
"def",
"bitset",
"(",
"name",
",",
"members",
",",
"base",
"=",
"bases",
".",
"BitSet",
",",
"list",
"=",
"False",
",",
"tuple",
"=",
"False",
")",
":",
"if",
"not",
"name",
":",
"raise",
"ValueError",
"(",
"'empty bitset name: %r'",
"%",
"name",
")",
"if",
"not",
"hasattr",
"(",
"members",
",",
"'__getitem__'",
")",
"or",
"not",
"hasattr",
"(",
"members",
",",
"'__len__'",
")",
":",
"raise",
"ValueError",
"(",
"'non-sequence bitset members: %r'",
"%",
"members",
")",
"if",
"not",
"len",
"(",
"members",
")",
":",
"raise",
"ValueError",
"(",
"'less than one bitset member: %r'",
"%",
"(",
"members",
",",
")",
")",
"if",
"len",
"(",
"set",
"(",
"members",
")",
")",
"!=",
"len",
"(",
"members",
")",
":",
"raise",
"ValueError",
"(",
"'bitset members contains duplicates: %r'",
"%",
"(",
"members",
",",
")",
")",
"if",
"not",
"issubclass",
"(",
"base",
".",
"__class__",
",",
"meta",
".",
"MemberBitsMeta",
")",
":",
"raise",
"ValueError",
"(",
"'base does not subclass bitset.bases: %r'",
"%",
"base",
")",
"list",
"=",
"{",
"False",
":",
"None",
",",
"True",
":",
"series",
".",
"List",
"}",
".",
"get",
"(",
"list",
",",
"list",
")",
"tuple",
"=",
"{",
"False",
":",
"None",
",",
"True",
":",
"series",
".",
"Tuple",
"}",
".",
"get",
"(",
"tuple",
",",
"tuple",
")",
"return",
"base",
".",
"_make_subclass",
"(",
"name",
",",
"members",
",",
"listcls",
"=",
"list",
",",
"tuplecls",
"=",
"tuple",
")"
] | Return a new bitset class with given name and members.
Args:
name: Name of the class to be created.
members: Hashable sequence of allowed bitset members.
base: Base class to derive the returned class from.
list (bool): Include a custom class for bitset lists.
tuple (bool): Include a custom class for bitset tuples.
Example:
>>> Letters = bitset('Letters', 'abcdef', list=True, tuple=True)
>>> Letters # doctest: +ELLIPSIS
<class bitsets.meta.bitset('Letters', 'abcdef', 0x..., BitSet, List, Tuple)>
>>> Letters('deadbeef')
Letters(['a', 'b', 'd', 'e', 'f']) | [
"Return",
"a",
"new",
"bitset",
"class",
"with",
"given",
"name",
"and",
"members",
"."
] | ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf | https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/__init__.py#L16-L51 | train |
guaix-ucm/numina | numina/array/interpolation.py | SteffenInterpolator._extrapolation | def _extrapolation(self, extrapolate):
"""Check permited values of extrapolation."""
modes = ['extrapolate',
'raise',
'const',
'border']
if extrapolate not in modes:
msg = 'invalid extrapolation mode {}'.format(extrapolate)
raise ValueError(msg)
if extrapolate == 'raise':
self.bounds_error = True
self.extrapolate = False
else:
self.extrapolate = True
self.bounds_error = False
self.extrapolate_mode = extrapolate | python | def _extrapolation(self, extrapolate):
"""Check permited values of extrapolation."""
modes = ['extrapolate',
'raise',
'const',
'border']
if extrapolate not in modes:
msg = 'invalid extrapolation mode {}'.format(extrapolate)
raise ValueError(msg)
if extrapolate == 'raise':
self.bounds_error = True
self.extrapolate = False
else:
self.extrapolate = True
self.bounds_error = False
self.extrapolate_mode = extrapolate | [
"def",
"_extrapolation",
"(",
"self",
",",
"extrapolate",
")",
":",
"modes",
"=",
"[",
"'extrapolate'",
",",
"'raise'",
",",
"'const'",
",",
"'border'",
"]",
"if",
"extrapolate",
"not",
"in",
"modes",
":",
"msg",
"=",
"'invalid extrapolation mode {}'",
".",
"format",
"(",
"extrapolate",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"if",
"extrapolate",
"==",
"'raise'",
":",
"self",
".",
"bounds_error",
"=",
"True",
"self",
".",
"extrapolate",
"=",
"False",
"else",
":",
"self",
".",
"extrapolate",
"=",
"True",
"self",
".",
"bounds_error",
"=",
"False",
"self",
".",
"extrapolate_mode",
"=",
"extrapolate"
] | Check permited values of extrapolation. | [
"Check",
"permited",
"values",
"of",
"extrapolation",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/interpolation.py#L104-L120 | train |
guaix-ucm/numina | numina/array/interpolation.py | SteffenInterpolator._create_h | def _create_h(x):
"""increase between samples"""
h = np.zeros_like(x)
h[:-1] = x[1:] - x[:-1]
# border
h[-1] = h[-2]
return h | python | def _create_h(x):
"""increase between samples"""
h = np.zeros_like(x)
h[:-1] = x[1:] - x[:-1]
# border
h[-1] = h[-2]
return h | [
"def",
"_create_h",
"(",
"x",
")",
":",
"h",
"=",
"np",
".",
"zeros_like",
"(",
"x",
")",
"h",
"[",
":",
"-",
"1",
"]",
"=",
"x",
"[",
"1",
":",
"]",
"-",
"x",
"[",
":",
"-",
"1",
"]",
"# border",
"h",
"[",
"-",
"1",
"]",
"=",
"h",
"[",
"-",
"2",
"]",
"return",
"h"
] | increase between samples | [
"increase",
"between",
"samples"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/interpolation.py#L131-L137 | train |
guaix-ucm/numina | numina/array/interpolation.py | SteffenInterpolator._eval | def _eval(self, v, in_bounds, der):
"""Eval polynomial inside bounds."""
result = np.zeros_like(v, dtype='float')
x_indices = np.searchsorted(self._x, v, side='rigth')
ids = x_indices[in_bounds] - 1
u = v[in_bounds] - self._x[ids]
result[in_bounds] = self._poly_eval(u, ids, der)
return result | python | def _eval(self, v, in_bounds, der):
"""Eval polynomial inside bounds."""
result = np.zeros_like(v, dtype='float')
x_indices = np.searchsorted(self._x, v, side='rigth')
ids = x_indices[in_bounds] - 1
u = v[in_bounds] - self._x[ids]
result[in_bounds] = self._poly_eval(u, ids, der)
return result | [
"def",
"_eval",
"(",
"self",
",",
"v",
",",
"in_bounds",
",",
"der",
")",
":",
"result",
"=",
"np",
".",
"zeros_like",
"(",
"v",
",",
"dtype",
"=",
"'float'",
")",
"x_indices",
"=",
"np",
".",
"searchsorted",
"(",
"self",
".",
"_x",
",",
"v",
",",
"side",
"=",
"'rigth'",
")",
"ids",
"=",
"x_indices",
"[",
"in_bounds",
"]",
"-",
"1",
"u",
"=",
"v",
"[",
"in_bounds",
"]",
"-",
"self",
".",
"_x",
"[",
"ids",
"]",
"result",
"[",
"in_bounds",
"]",
"=",
"self",
".",
"_poly_eval",
"(",
"u",
",",
"ids",
",",
"der",
")",
"return",
"result"
] | Eval polynomial inside bounds. | [
"Eval",
"polynomial",
"inside",
"bounds",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/interpolation.py#L146-L153 | train |
guaix-ucm/numina | numina/array/interpolation.py | SteffenInterpolator._extrapolate | def _extrapolate(self, result, v, below_bounds, above_bounds, der):
"""Extrapolate result based on extrapolation mode."""
if self.extrapolate_mode == 'const':
fill_b = fill_a = self.fill_value
elif self.extrapolate_mode == 'border':
fill_b = self._poly_eval(0, 0, der)
fill_a = self._poly_eval(0, -1, der)
elif self.extrapolate_mode == 'extrapolate':
u = v[above_bounds] - self._x[-2]
fill_a = self._poly_eval(u, -2, der)
u = v[below_bounds] - self._x[0]
fill_b = self._poly_eval(u, 0, der)
else:
raise ValueError("extrapolation method doesn't exist")
result[below_bounds] = fill_b
result[above_bounds] = fill_a | python | def _extrapolate(self, result, v, below_bounds, above_bounds, der):
"""Extrapolate result based on extrapolation mode."""
if self.extrapolate_mode == 'const':
fill_b = fill_a = self.fill_value
elif self.extrapolate_mode == 'border':
fill_b = self._poly_eval(0, 0, der)
fill_a = self._poly_eval(0, -1, der)
elif self.extrapolate_mode == 'extrapolate':
u = v[above_bounds] - self._x[-2]
fill_a = self._poly_eval(u, -2, der)
u = v[below_bounds] - self._x[0]
fill_b = self._poly_eval(u, 0, der)
else:
raise ValueError("extrapolation method doesn't exist")
result[below_bounds] = fill_b
result[above_bounds] = fill_a | [
"def",
"_extrapolate",
"(",
"self",
",",
"result",
",",
"v",
",",
"below_bounds",
",",
"above_bounds",
",",
"der",
")",
":",
"if",
"self",
".",
"extrapolate_mode",
"==",
"'const'",
":",
"fill_b",
"=",
"fill_a",
"=",
"self",
".",
"fill_value",
"elif",
"self",
".",
"extrapolate_mode",
"==",
"'border'",
":",
"fill_b",
"=",
"self",
".",
"_poly_eval",
"(",
"0",
",",
"0",
",",
"der",
")",
"fill_a",
"=",
"self",
".",
"_poly_eval",
"(",
"0",
",",
"-",
"1",
",",
"der",
")",
"elif",
"self",
".",
"extrapolate_mode",
"==",
"'extrapolate'",
":",
"u",
"=",
"v",
"[",
"above_bounds",
"]",
"-",
"self",
".",
"_x",
"[",
"-",
"2",
"]",
"fill_a",
"=",
"self",
".",
"_poly_eval",
"(",
"u",
",",
"-",
"2",
",",
"der",
")",
"u",
"=",
"v",
"[",
"below_bounds",
"]",
"-",
"self",
".",
"_x",
"[",
"0",
"]",
"fill_b",
"=",
"self",
".",
"_poly_eval",
"(",
"u",
",",
"0",
",",
"der",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"extrapolation method doesn't exist\"",
")",
"result",
"[",
"below_bounds",
"]",
"=",
"fill_b",
"result",
"[",
"above_bounds",
"]",
"=",
"fill_a"
] | Extrapolate result based on extrapolation mode. | [
"Extrapolate",
"result",
"based",
"on",
"extrapolation",
"mode",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/interpolation.py#L155-L171 | train |
guaix-ucm/numina | numina/array/interpolation.py | SteffenInterpolator._check_bounds | def _check_bounds(self, v):
"""Check which values are out of bounds.
Raises
------
ValueError:
"""
below_bounds = v < self._x[0]
above_bounds = v > self._x[-1]
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
return below_bounds, above_bounds | python | def _check_bounds(self, v):
"""Check which values are out of bounds.
Raises
------
ValueError:
"""
below_bounds = v < self._x[0]
above_bounds = v > self._x[-1]
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
return below_bounds, above_bounds | [
"def",
"_check_bounds",
"(",
"self",
",",
"v",
")",
":",
"below_bounds",
"=",
"v",
"<",
"self",
".",
"_x",
"[",
"0",
"]",
"above_bounds",
"=",
"v",
">",
"self",
".",
"_x",
"[",
"-",
"1",
"]",
"if",
"self",
".",
"bounds_error",
"and",
"below_bounds",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"A value in x_new is below the interpolation \"",
"\"range.\"",
")",
"if",
"self",
".",
"bounds_error",
"and",
"above_bounds",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"A value in x_new is above the interpolation \"",
"\"range.\"",
")",
"return",
"below_bounds",
",",
"above_bounds"
] | Check which values are out of bounds.
Raises
------
ValueError: | [
"Check",
"which",
"values",
"are",
"out",
"of",
"bounds",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/interpolation.py#L238-L256 | train |
guaix-ucm/numina | numina/array/wavecalib/crosscorrelation.py | filtmask | def filtmask(sp, fmin=0.02, fmax=0.15, debugplot=0):
"""Filter spectrum in Fourier space and apply cosine bell.
Parameters
----------
sp : numpy array
Spectrum to be filtered and masked.
fmin : float
Minimum frequency to be employed.
fmax : float
Maximum frequency to be employed.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
sp_filtmask : numpy array
Filtered and masked spectrum
"""
# Fourier filtering
xf = np.fft.fftfreq(sp.size)
yf = np.fft.fft(sp)
if abs(debugplot) in (21, 22):
iok = np.where(xf > 0)
ximplotxy(xf[iok], yf[iok].real,
plottype='loglog',
xlabel='frequency', ylabel='power',
title='before masking', debugplot=debugplot)
cut = (np.abs(xf) > fmax)
yf[cut] = 0.0
cut = (np.abs(xf) < fmin)
yf[cut] = 0.0
if abs(debugplot) in (21, 22):
iok = np.where(xf > 0)
ximplotxy(xf[iok], yf[iok].real,
plottype='loglog',
xlabel='frequency', ylabel='power',
title='after masking', debugplot=debugplot)
sp_filt = np.fft.ifft(yf).real
if abs(debugplot) in (21, 22):
xdum = np.arange(1, sp_filt.size + 1)
ximplotxy(xdum, sp_filt, title="filtered median spectrum",
debugplot=debugplot)
sp_filtmask = sp_filt * cosinebell(sp_filt.size, 0.1)
if abs(debugplot) in (21, 22):
xdum = np.arange(1, sp_filt.size + 1)
ximplotxy(xdum, sp_filtmask,
title="filtered and masked median spectrum",
debugplot=debugplot)
return sp_filtmask | python | def filtmask(sp, fmin=0.02, fmax=0.15, debugplot=0):
"""Filter spectrum in Fourier space and apply cosine bell.
Parameters
----------
sp : numpy array
Spectrum to be filtered and masked.
fmin : float
Minimum frequency to be employed.
fmax : float
Maximum frequency to be employed.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
sp_filtmask : numpy array
Filtered and masked spectrum
"""
# Fourier filtering
xf = np.fft.fftfreq(sp.size)
yf = np.fft.fft(sp)
if abs(debugplot) in (21, 22):
iok = np.where(xf > 0)
ximplotxy(xf[iok], yf[iok].real,
plottype='loglog',
xlabel='frequency', ylabel='power',
title='before masking', debugplot=debugplot)
cut = (np.abs(xf) > fmax)
yf[cut] = 0.0
cut = (np.abs(xf) < fmin)
yf[cut] = 0.0
if abs(debugplot) in (21, 22):
iok = np.where(xf > 0)
ximplotxy(xf[iok], yf[iok].real,
plottype='loglog',
xlabel='frequency', ylabel='power',
title='after masking', debugplot=debugplot)
sp_filt = np.fft.ifft(yf).real
if abs(debugplot) in (21, 22):
xdum = np.arange(1, sp_filt.size + 1)
ximplotxy(xdum, sp_filt, title="filtered median spectrum",
debugplot=debugplot)
sp_filtmask = sp_filt * cosinebell(sp_filt.size, 0.1)
if abs(debugplot) in (21, 22):
xdum = np.arange(1, sp_filt.size + 1)
ximplotxy(xdum, sp_filtmask,
title="filtered and masked median spectrum",
debugplot=debugplot)
return sp_filtmask | [
"def",
"filtmask",
"(",
"sp",
",",
"fmin",
"=",
"0.02",
",",
"fmax",
"=",
"0.15",
",",
"debugplot",
"=",
"0",
")",
":",
"# Fourier filtering",
"xf",
"=",
"np",
".",
"fft",
".",
"fftfreq",
"(",
"sp",
".",
"size",
")",
"yf",
"=",
"np",
".",
"fft",
".",
"fft",
"(",
"sp",
")",
"if",
"abs",
"(",
"debugplot",
")",
"in",
"(",
"21",
",",
"22",
")",
":",
"iok",
"=",
"np",
".",
"where",
"(",
"xf",
">",
"0",
")",
"ximplotxy",
"(",
"xf",
"[",
"iok",
"]",
",",
"yf",
"[",
"iok",
"]",
".",
"real",
",",
"plottype",
"=",
"'loglog'",
",",
"xlabel",
"=",
"'frequency'",
",",
"ylabel",
"=",
"'power'",
",",
"title",
"=",
"'before masking'",
",",
"debugplot",
"=",
"debugplot",
")",
"cut",
"=",
"(",
"np",
".",
"abs",
"(",
"xf",
")",
">",
"fmax",
")",
"yf",
"[",
"cut",
"]",
"=",
"0.0",
"cut",
"=",
"(",
"np",
".",
"abs",
"(",
"xf",
")",
"<",
"fmin",
")",
"yf",
"[",
"cut",
"]",
"=",
"0.0",
"if",
"abs",
"(",
"debugplot",
")",
"in",
"(",
"21",
",",
"22",
")",
":",
"iok",
"=",
"np",
".",
"where",
"(",
"xf",
">",
"0",
")",
"ximplotxy",
"(",
"xf",
"[",
"iok",
"]",
",",
"yf",
"[",
"iok",
"]",
".",
"real",
",",
"plottype",
"=",
"'loglog'",
",",
"xlabel",
"=",
"'frequency'",
",",
"ylabel",
"=",
"'power'",
",",
"title",
"=",
"'after masking'",
",",
"debugplot",
"=",
"debugplot",
")",
"sp_filt",
"=",
"np",
".",
"fft",
".",
"ifft",
"(",
"yf",
")",
".",
"real",
"if",
"abs",
"(",
"debugplot",
")",
"in",
"(",
"21",
",",
"22",
")",
":",
"xdum",
"=",
"np",
".",
"arange",
"(",
"1",
",",
"sp_filt",
".",
"size",
"+",
"1",
")",
"ximplotxy",
"(",
"xdum",
",",
"sp_filt",
",",
"title",
"=",
"\"filtered median spectrum\"",
",",
"debugplot",
"=",
"debugplot",
")",
"sp_filtmask",
"=",
"sp_filt",
"*",
"cosinebell",
"(",
"sp_filt",
".",
"size",
",",
"0.1",
")",
"if",
"abs",
"(",
"debugplot",
")",
"in",
"(",
"21",
",",
"22",
")",
":",
"xdum",
"=",
"np",
".",
"arange",
"(",
"1",
",",
"sp_filt",
".",
"size",
"+",
"1",
")",
"ximplotxy",
"(",
"xdum",
",",
"sp_filtmask",
",",
"title",
"=",
"\"filtered and masked median spectrum\"",
",",
"debugplot",
"=",
"debugplot",
")",
"return",
"sp_filtmask"
] | Filter spectrum in Fourier space and apply cosine bell.
Parameters
----------
sp : numpy array
Spectrum to be filtered and masked.
fmin : float
Minimum frequency to be employed.
fmax : float
Maximum frequency to be employed.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
sp_filtmask : numpy array
Filtered and masked spectrum | [
"Filter",
"spectrum",
"in",
"Fourier",
"space",
"and",
"apply",
"cosine",
"bell",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/crosscorrelation.py#L24-L80 | train |
guaix-ucm/numina | numina/array/wavecalib/crosscorrelation.py | cosinebell | def cosinebell(n, fraction):
"""Return a cosine bell spanning n pixels, masking a fraction of pixels
Parameters
----------
n : int
Number of pixels.
fraction : float
Length fraction over which the data will be masked.
"""
mask = np.ones(n)
nmasked = int(fraction * n)
for i in range(nmasked):
yval = 0.5 * (1 - np.cos(np.pi * float(i) / float(nmasked)))
mask[i] = yval
mask[n - i - 1] = yval
return mask | python | def cosinebell(n, fraction):
"""Return a cosine bell spanning n pixels, masking a fraction of pixels
Parameters
----------
n : int
Number of pixels.
fraction : float
Length fraction over which the data will be masked.
"""
mask = np.ones(n)
nmasked = int(fraction * n)
for i in range(nmasked):
yval = 0.5 * (1 - np.cos(np.pi * float(i) / float(nmasked)))
mask[i] = yval
mask[n - i - 1] = yval
return mask | [
"def",
"cosinebell",
"(",
"n",
",",
"fraction",
")",
":",
"mask",
"=",
"np",
".",
"ones",
"(",
"n",
")",
"nmasked",
"=",
"int",
"(",
"fraction",
"*",
"n",
")",
"for",
"i",
"in",
"range",
"(",
"nmasked",
")",
":",
"yval",
"=",
"0.5",
"*",
"(",
"1",
"-",
"np",
".",
"cos",
"(",
"np",
".",
"pi",
"*",
"float",
"(",
"i",
")",
"/",
"float",
"(",
"nmasked",
")",
")",
")",
"mask",
"[",
"i",
"]",
"=",
"yval",
"mask",
"[",
"n",
"-",
"i",
"-",
"1",
"]",
"=",
"yval",
"return",
"mask"
] | Return a cosine bell spanning n pixels, masking a fraction of pixels
Parameters
----------
n : int
Number of pixels.
fraction : float
Length fraction over which the data will be masked. | [
"Return",
"a",
"cosine",
"bell",
"spanning",
"n",
"pixels",
"masking",
"a",
"fraction",
"of",
"pixels"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/crosscorrelation.py#L83-L102 | train |
guaix-ucm/numina | numina/array/wavecalib/crosscorrelation.py | convolve_comb_lines | def convolve_comb_lines(lines_wave, lines_flux, sigma,
crpix1, crval1, cdelt1, naxis1):
"""Convolve a set of lines of known wavelengths and flux.
Parameters
----------
lines_wave : array like
Input array with wavelengths
lines_flux : array like
Input array with fluxes
sigma : float
Sigma of the broadening gaussian to be applied.
crpix1 : float
CRPIX1 of the desired wavelength calibration.
crval1 : float
CRVAL1 of the desired wavelength calibration.
cdelt1 : float
CDELT1 of the desired wavelength calibration.
naxis1 : integer
NAXIS1 of the output spectrum.
Returns
-------
xwave : array like
Array with wavelengths for the output spectrum.
spectrum : array like
Array with the expected fluxes at each pixel.
"""
# generate wavelengths for output spectrum
xwave = crval1 + (np.arange(naxis1) + 1 - crpix1) * cdelt1
# initialize output spectrum
spectrum = np.zeros(naxis1)
# convolve each line
for wave, flux in zip(lines_wave, lines_flux):
sp_tmp = gauss_box_model(x=xwave, amplitude=flux, mean=wave,
stddev=sigma)
spectrum += sp_tmp
return xwave, spectrum | python | def convolve_comb_lines(lines_wave, lines_flux, sigma,
crpix1, crval1, cdelt1, naxis1):
"""Convolve a set of lines of known wavelengths and flux.
Parameters
----------
lines_wave : array like
Input array with wavelengths
lines_flux : array like
Input array with fluxes
sigma : float
Sigma of the broadening gaussian to be applied.
crpix1 : float
CRPIX1 of the desired wavelength calibration.
crval1 : float
CRVAL1 of the desired wavelength calibration.
cdelt1 : float
CDELT1 of the desired wavelength calibration.
naxis1 : integer
NAXIS1 of the output spectrum.
Returns
-------
xwave : array like
Array with wavelengths for the output spectrum.
spectrum : array like
Array with the expected fluxes at each pixel.
"""
# generate wavelengths for output spectrum
xwave = crval1 + (np.arange(naxis1) + 1 - crpix1) * cdelt1
# initialize output spectrum
spectrum = np.zeros(naxis1)
# convolve each line
for wave, flux in zip(lines_wave, lines_flux):
sp_tmp = gauss_box_model(x=xwave, amplitude=flux, mean=wave,
stddev=sigma)
spectrum += sp_tmp
return xwave, spectrum | [
"def",
"convolve_comb_lines",
"(",
"lines_wave",
",",
"lines_flux",
",",
"sigma",
",",
"crpix1",
",",
"crval1",
",",
"cdelt1",
",",
"naxis1",
")",
":",
"# generate wavelengths for output spectrum",
"xwave",
"=",
"crval1",
"+",
"(",
"np",
".",
"arange",
"(",
"naxis1",
")",
"+",
"1",
"-",
"crpix1",
")",
"*",
"cdelt1",
"# initialize output spectrum",
"spectrum",
"=",
"np",
".",
"zeros",
"(",
"naxis1",
")",
"# convolve each line",
"for",
"wave",
",",
"flux",
"in",
"zip",
"(",
"lines_wave",
",",
"lines_flux",
")",
":",
"sp_tmp",
"=",
"gauss_box_model",
"(",
"x",
"=",
"xwave",
",",
"amplitude",
"=",
"flux",
",",
"mean",
"=",
"wave",
",",
"stddev",
"=",
"sigma",
")",
"spectrum",
"+=",
"sp_tmp",
"return",
"xwave",
",",
"spectrum"
] | Convolve a set of lines of known wavelengths and flux.
Parameters
----------
lines_wave : array like
Input array with wavelengths
lines_flux : array like
Input array with fluxes
sigma : float
Sigma of the broadening gaussian to be applied.
crpix1 : float
CRPIX1 of the desired wavelength calibration.
crval1 : float
CRVAL1 of the desired wavelength calibration.
cdelt1 : float
CDELT1 of the desired wavelength calibration.
naxis1 : integer
NAXIS1 of the output spectrum.
Returns
-------
xwave : array like
Array with wavelengths for the output spectrum.
spectrum : array like
Array with the expected fluxes at each pixel. | [
"Convolve",
"a",
"set",
"of",
"lines",
"of",
"known",
"wavelengths",
"and",
"flux",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/crosscorrelation.py#L105-L147 | train |
inspirehep/inspire-schemas | inspire_schemas/builders/references.py | _split_refextract_authors_str | def _split_refextract_authors_str(authors_str):
"""Extract author names out of refextract authors output."""
author_seq = (x.strip() for x in RE_SPLIT_AUTH.split(authors_str) if x)
res = []
current = ''
for author in author_seq:
if not isinstance(author, six.text_type):
author = six.text_type(author.decode('utf8', 'ignore'))
# First clean the token.
author = re.sub(r'\(|\)', '', author, re.U)
# Names usually start with characters.
author = re.sub(r'^[\W\d]+', '', author, re.U)
# Names should end with characters or dot.
author = re.sub(r'[^.\w]+$', '', author, re.U)
# If we have initials join them with the previous token.
if RE_INITIALS_ONLY.match(author):
current += ', ' + author.strip().replace('. ', '.')
else:
if current:
res.append(current)
current = author
# Add last element.
if current:
res.append(current)
# Manual filterings that we don't want to add in regular expressions since
# it would make them more complex.
# * ed might sneak in
# * many legacy refs look like 'X. and Somebody E.'
# * might miss lowercase initials
filters = [
lambda a: a == 'ed',
lambda a: a.startswith(','),
lambda a: len(a) == 1
]
res = [r for r in res if all(not f(r) for f in filters)]
return res | python | def _split_refextract_authors_str(authors_str):
"""Extract author names out of refextract authors output."""
author_seq = (x.strip() for x in RE_SPLIT_AUTH.split(authors_str) if x)
res = []
current = ''
for author in author_seq:
if not isinstance(author, six.text_type):
author = six.text_type(author.decode('utf8', 'ignore'))
# First clean the token.
author = re.sub(r'\(|\)', '', author, re.U)
# Names usually start with characters.
author = re.sub(r'^[\W\d]+', '', author, re.U)
# Names should end with characters or dot.
author = re.sub(r'[^.\w]+$', '', author, re.U)
# If we have initials join them with the previous token.
if RE_INITIALS_ONLY.match(author):
current += ', ' + author.strip().replace('. ', '.')
else:
if current:
res.append(current)
current = author
# Add last element.
if current:
res.append(current)
# Manual filterings that we don't want to add in regular expressions since
# it would make them more complex.
# * ed might sneak in
# * many legacy refs look like 'X. and Somebody E.'
# * might miss lowercase initials
filters = [
lambda a: a == 'ed',
lambda a: a.startswith(','),
lambda a: len(a) == 1
]
res = [r for r in res if all(not f(r) for f in filters)]
return res | [
"def",
"_split_refextract_authors_str",
"(",
"authors_str",
")",
":",
"author_seq",
"=",
"(",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"RE_SPLIT_AUTH",
".",
"split",
"(",
"authors_str",
")",
"if",
"x",
")",
"res",
"=",
"[",
"]",
"current",
"=",
"''",
"for",
"author",
"in",
"author_seq",
":",
"if",
"not",
"isinstance",
"(",
"author",
",",
"six",
".",
"text_type",
")",
":",
"author",
"=",
"six",
".",
"text_type",
"(",
"author",
".",
"decode",
"(",
"'utf8'",
",",
"'ignore'",
")",
")",
"# First clean the token.",
"author",
"=",
"re",
".",
"sub",
"(",
"r'\\(|\\)'",
",",
"''",
",",
"author",
",",
"re",
".",
"U",
")",
"# Names usually start with characters.",
"author",
"=",
"re",
".",
"sub",
"(",
"r'^[\\W\\d]+'",
",",
"''",
",",
"author",
",",
"re",
".",
"U",
")",
"# Names should end with characters or dot.",
"author",
"=",
"re",
".",
"sub",
"(",
"r'[^.\\w]+$'",
",",
"''",
",",
"author",
",",
"re",
".",
"U",
")",
"# If we have initials join them with the previous token.",
"if",
"RE_INITIALS_ONLY",
".",
"match",
"(",
"author",
")",
":",
"current",
"+=",
"', '",
"+",
"author",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"'. '",
",",
"'.'",
")",
"else",
":",
"if",
"current",
":",
"res",
".",
"append",
"(",
"current",
")",
"current",
"=",
"author",
"# Add last element.",
"if",
"current",
":",
"res",
".",
"append",
"(",
"current",
")",
"# Manual filterings that we don't want to add in regular expressions since",
"# it would make them more complex.",
"# * ed might sneak in",
"# * many legacy refs look like 'X. and Somebody E.'",
"# * might miss lowercase initials",
"filters",
"=",
"[",
"lambda",
"a",
":",
"a",
"==",
"'ed'",
",",
"lambda",
"a",
":",
"a",
".",
"startswith",
"(",
"','",
")",
",",
"lambda",
"a",
":",
"len",
"(",
"a",
")",
"==",
"1",
"]",
"res",
"=",
"[",
"r",
"for",
"r",
"in",
"res",
"if",
"all",
"(",
"not",
"f",
"(",
"r",
")",
"for",
"f",
"in",
"filters",
")",
"]",
"return",
"res"
] | Extract author names out of refextract authors output. | [
"Extract",
"author",
"names",
"out",
"of",
"refextract",
"authors",
"output",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/references.py#L57-L97 | train |
inspirehep/inspire-schemas | inspire_schemas/builders/references.py | ReferenceBuilder._set_publication_info_field | def _set_publication_info_field(self, field_name, value):
"""Put a value in the publication info of the reference."""
self._ensure_reference_field('publication_info', {})
self.obj['reference']['publication_info'][field_name] = value | python | def _set_publication_info_field(self, field_name, value):
"""Put a value in the publication info of the reference."""
self._ensure_reference_field('publication_info', {})
self.obj['reference']['publication_info'][field_name] = value | [
"def",
"_set_publication_info_field",
"(",
"self",
",",
"field_name",
",",
"value",
")",
":",
"self",
".",
"_ensure_reference_field",
"(",
"'publication_info'",
",",
"{",
"}",
")",
"self",
".",
"obj",
"[",
"'reference'",
"]",
"[",
"'publication_info'",
"]",
"[",
"field_name",
"]",
"=",
"value"
] | Put a value in the publication info of the reference. | [
"Put",
"a",
"value",
"in",
"the",
"publication",
"info",
"of",
"the",
"reference",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/references.py#L148-L151 | train |
inspirehep/inspire-schemas | inspire_schemas/builders/references.py | ReferenceBuilder.set_pubnote | def set_pubnote(self, pubnote):
"""Parse pubnote and populate correct fields."""
if 'publication_info' in self.obj.get('reference', {}):
self.add_misc(u'Additional pubnote: {}'.format(pubnote))
return
if self.RE_VALID_PUBNOTE.match(pubnote):
pubnote = split_pubnote(pubnote)
pubnote = convert_old_publication_info_to_new([pubnote])[0]
self._ensure_reference_field('publication_info', pubnote)
else:
self.add_misc(pubnote) | python | def set_pubnote(self, pubnote):
"""Parse pubnote and populate correct fields."""
if 'publication_info' in self.obj.get('reference', {}):
self.add_misc(u'Additional pubnote: {}'.format(pubnote))
return
if self.RE_VALID_PUBNOTE.match(pubnote):
pubnote = split_pubnote(pubnote)
pubnote = convert_old_publication_info_to_new([pubnote])[0]
self._ensure_reference_field('publication_info', pubnote)
else:
self.add_misc(pubnote) | [
"def",
"set_pubnote",
"(",
"self",
",",
"pubnote",
")",
":",
"if",
"'publication_info'",
"in",
"self",
".",
"obj",
".",
"get",
"(",
"'reference'",
",",
"{",
"}",
")",
":",
"self",
".",
"add_misc",
"(",
"u'Additional pubnote: {}'",
".",
"format",
"(",
"pubnote",
")",
")",
"return",
"if",
"self",
".",
"RE_VALID_PUBNOTE",
".",
"match",
"(",
"pubnote",
")",
":",
"pubnote",
"=",
"split_pubnote",
"(",
"pubnote",
")",
"pubnote",
"=",
"convert_old_publication_info_to_new",
"(",
"[",
"pubnote",
"]",
")",
"[",
"0",
"]",
"self",
".",
"_ensure_reference_field",
"(",
"'publication_info'",
",",
"pubnote",
")",
"else",
":",
"self",
".",
"add_misc",
"(",
"pubnote",
")"
] | Parse pubnote and populate correct fields. | [
"Parse",
"pubnote",
"and",
"populate",
"correct",
"fields",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/references.py#L228-L239 | train |
inspirehep/inspire-schemas | inspire_schemas/builders/references.py | ReferenceBuilder._add_uid | def _add_uid(self, uid, skip_handle=False):
"""Add unique identifier in correct field.
The ``skip_handle`` flag is used when adding a uid through the add_url function
since urls can be easily confused with handle elements.
"""
# We might add None values from wherever. Kill them here.
uid = uid or ''
if is_arxiv(uid):
self._ensure_reference_field('arxiv_eprint', normalize_arxiv(uid))
elif idutils.is_doi(uid):
self._ensure_reference_field('dois', [])
self.obj['reference']['dois'].append(idutils.normalize_doi(uid))
elif idutils.is_handle(uid) and not skip_handle:
self._ensure_reference_field('persistent_identifiers', [])
self.obj['reference']['persistent_identifiers'].append({
'schema': 'HDL',
'value': idutils.normalize_handle(uid),
})
elif idutils.is_urn(uid):
self._ensure_reference_field('persistent_identifiers', [])
self.obj['reference']['persistent_identifiers'].append({
'schema': 'URN',
'value': uid,
})
elif self.RE_VALID_CNUM.match(uid):
self._ensure_reference_field('publication_info', {})
self.obj['reference']['publication_info']['cnum'] = uid
elif is_cds_url(uid):
self._ensure_reference_field('external_system_identifiers', [])
self.obj['reference']['external_system_identifiers'].append({
'schema': 'CDS',
'value': extract_cds_id(uid),
})
elif is_ads_url(uid):
self._ensure_reference_field('external_system_identifiers', [])
self.obj['reference']['external_system_identifiers'].append({
'schema': 'ADS',
'value': extract_ads_id(uid),
})
else:
# ``idutils.is_isbn`` is too strict in what it accepts.
try:
isbn = str(ISBN(uid))
self._ensure_reference_field('isbn', {})
self.obj['reference']['isbn'] = isbn
except Exception:
raise ValueError('Unrecognized uid type') | python | def _add_uid(self, uid, skip_handle=False):
"""Add unique identifier in correct field.
The ``skip_handle`` flag is used when adding a uid through the add_url function
since urls can be easily confused with handle elements.
"""
# We might add None values from wherever. Kill them here.
uid = uid or ''
if is_arxiv(uid):
self._ensure_reference_field('arxiv_eprint', normalize_arxiv(uid))
elif idutils.is_doi(uid):
self._ensure_reference_field('dois', [])
self.obj['reference']['dois'].append(idutils.normalize_doi(uid))
elif idutils.is_handle(uid) and not skip_handle:
self._ensure_reference_field('persistent_identifiers', [])
self.obj['reference']['persistent_identifiers'].append({
'schema': 'HDL',
'value': idutils.normalize_handle(uid),
})
elif idutils.is_urn(uid):
self._ensure_reference_field('persistent_identifiers', [])
self.obj['reference']['persistent_identifiers'].append({
'schema': 'URN',
'value': uid,
})
elif self.RE_VALID_CNUM.match(uid):
self._ensure_reference_field('publication_info', {})
self.obj['reference']['publication_info']['cnum'] = uid
elif is_cds_url(uid):
self._ensure_reference_field('external_system_identifiers', [])
self.obj['reference']['external_system_identifiers'].append({
'schema': 'CDS',
'value': extract_cds_id(uid),
})
elif is_ads_url(uid):
self._ensure_reference_field('external_system_identifiers', [])
self.obj['reference']['external_system_identifiers'].append({
'schema': 'ADS',
'value': extract_ads_id(uid),
})
else:
# ``idutils.is_isbn`` is too strict in what it accepts.
try:
isbn = str(ISBN(uid))
self._ensure_reference_field('isbn', {})
self.obj['reference']['isbn'] = isbn
except Exception:
raise ValueError('Unrecognized uid type') | [
"def",
"_add_uid",
"(",
"self",
",",
"uid",
",",
"skip_handle",
"=",
"False",
")",
":",
"# We might add None values from wherever. Kill them here.",
"uid",
"=",
"uid",
"or",
"''",
"if",
"is_arxiv",
"(",
"uid",
")",
":",
"self",
".",
"_ensure_reference_field",
"(",
"'arxiv_eprint'",
",",
"normalize_arxiv",
"(",
"uid",
")",
")",
"elif",
"idutils",
".",
"is_doi",
"(",
"uid",
")",
":",
"self",
".",
"_ensure_reference_field",
"(",
"'dois'",
",",
"[",
"]",
")",
"self",
".",
"obj",
"[",
"'reference'",
"]",
"[",
"'dois'",
"]",
".",
"append",
"(",
"idutils",
".",
"normalize_doi",
"(",
"uid",
")",
")",
"elif",
"idutils",
".",
"is_handle",
"(",
"uid",
")",
"and",
"not",
"skip_handle",
":",
"self",
".",
"_ensure_reference_field",
"(",
"'persistent_identifiers'",
",",
"[",
"]",
")",
"self",
".",
"obj",
"[",
"'reference'",
"]",
"[",
"'persistent_identifiers'",
"]",
".",
"append",
"(",
"{",
"'schema'",
":",
"'HDL'",
",",
"'value'",
":",
"idutils",
".",
"normalize_handle",
"(",
"uid",
")",
",",
"}",
")",
"elif",
"idutils",
".",
"is_urn",
"(",
"uid",
")",
":",
"self",
".",
"_ensure_reference_field",
"(",
"'persistent_identifiers'",
",",
"[",
"]",
")",
"self",
".",
"obj",
"[",
"'reference'",
"]",
"[",
"'persistent_identifiers'",
"]",
".",
"append",
"(",
"{",
"'schema'",
":",
"'URN'",
",",
"'value'",
":",
"uid",
",",
"}",
")",
"elif",
"self",
".",
"RE_VALID_CNUM",
".",
"match",
"(",
"uid",
")",
":",
"self",
".",
"_ensure_reference_field",
"(",
"'publication_info'",
",",
"{",
"}",
")",
"self",
".",
"obj",
"[",
"'reference'",
"]",
"[",
"'publication_info'",
"]",
"[",
"'cnum'",
"]",
"=",
"uid",
"elif",
"is_cds_url",
"(",
"uid",
")",
":",
"self",
".",
"_ensure_reference_field",
"(",
"'external_system_identifiers'",
",",
"[",
"]",
")",
"self",
".",
"obj",
"[",
"'reference'",
"]",
"[",
"'external_system_identifiers'",
"]",
".",
"append",
"(",
"{",
"'schema'",
":",
"'CDS'",
",",
"'value'",
":",
"extract_cds_id",
"(",
"uid",
")",
",",
"}",
")",
"elif",
"is_ads_url",
"(",
"uid",
")",
":",
"self",
".",
"_ensure_reference_field",
"(",
"'external_system_identifiers'",
",",
"[",
"]",
")",
"self",
".",
"obj",
"[",
"'reference'",
"]",
"[",
"'external_system_identifiers'",
"]",
".",
"append",
"(",
"{",
"'schema'",
":",
"'ADS'",
",",
"'value'",
":",
"extract_ads_id",
"(",
"uid",
")",
",",
"}",
")",
"else",
":",
"# ``idutils.is_isbn`` is too strict in what it accepts.",
"try",
":",
"isbn",
"=",
"str",
"(",
"ISBN",
"(",
"uid",
")",
")",
"self",
".",
"_ensure_reference_field",
"(",
"'isbn'",
",",
"{",
"}",
")",
"self",
".",
"obj",
"[",
"'reference'",
"]",
"[",
"'isbn'",
"]",
"=",
"isbn",
"except",
"Exception",
":",
"raise",
"ValueError",
"(",
"'Unrecognized uid type'",
")"
] | Add unique identifier in correct field.
The ``skip_handle`` flag is used when adding a uid through the add_url function
since urls can be easily confused with handle elements. | [
"Add",
"unique",
"identifier",
"in",
"correct",
"field",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/references.py#L271-L318 | train |
inspirehep/inspire-schemas | inspire_schemas/builders/references.py | ReferenceBuilder.set_page_artid | def set_page_artid(self, page_start=None, page_end=None, artid=None):
"""Add artid, start, end pages to publication info of a reference.
Args:
page_start(Optional[string]): value for the field page_start
page_end(Optional[string]): value for the field page_end
artid(Optional[string]): value for the field artid
Raises:
ValueError: when no start_page given for an end_page
"""
if page_end and not page_start:
raise ValueError('End_page provided without start_page')
self._ensure_reference_field('publication_info', {})
publication_info = self.obj['reference']['publication_info']
if page_start:
publication_info['page_start'] = page_start
if page_end:
publication_info['page_end'] = page_end
if artid:
publication_info['artid'] = artid | python | def set_page_artid(self, page_start=None, page_end=None, artid=None):
"""Add artid, start, end pages to publication info of a reference.
Args:
page_start(Optional[string]): value for the field page_start
page_end(Optional[string]): value for the field page_end
artid(Optional[string]): value for the field artid
Raises:
ValueError: when no start_page given for an end_page
"""
if page_end and not page_start:
raise ValueError('End_page provided without start_page')
self._ensure_reference_field('publication_info', {})
publication_info = self.obj['reference']['publication_info']
if page_start:
publication_info['page_start'] = page_start
if page_end:
publication_info['page_end'] = page_end
if artid:
publication_info['artid'] = artid | [
"def",
"set_page_artid",
"(",
"self",
",",
"page_start",
"=",
"None",
",",
"page_end",
"=",
"None",
",",
"artid",
"=",
"None",
")",
":",
"if",
"page_end",
"and",
"not",
"page_start",
":",
"raise",
"ValueError",
"(",
"'End_page provided without start_page'",
")",
"self",
".",
"_ensure_reference_field",
"(",
"'publication_info'",
",",
"{",
"}",
")",
"publication_info",
"=",
"self",
".",
"obj",
"[",
"'reference'",
"]",
"[",
"'publication_info'",
"]",
"if",
"page_start",
":",
"publication_info",
"[",
"'page_start'",
"]",
"=",
"page_start",
"if",
"page_end",
":",
"publication_info",
"[",
"'page_end'",
"]",
"=",
"page_end",
"if",
"artid",
":",
"publication_info",
"[",
"'artid'",
"]",
"=",
"artid"
] | Add artid, start, end pages to publication info of a reference.
Args:
page_start(Optional[string]): value for the field page_start
page_end(Optional[string]): value for the field page_end
artid(Optional[string]): value for the field artid
Raises:
ValueError: when no start_page given for an end_page | [
"Add",
"artid",
"start",
"end",
"pages",
"to",
"publication",
"info",
"of",
"a",
"reference",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/references.py#L336-L357 | train |
pylp/pylp | pylp/utils/glob.py | separate_globs | def separate_globs(globs):
"""Separate include and exclude globs."""
exclude = []
include = []
for path in globs:
if path.startswith("!"):
exclude.append(path[1:])
else:
include.append(path)
return (exclude, include) | python | def separate_globs(globs):
"""Separate include and exclude globs."""
exclude = []
include = []
for path in globs:
if path.startswith("!"):
exclude.append(path[1:])
else:
include.append(path)
return (exclude, include) | [
"def",
"separate_globs",
"(",
"globs",
")",
":",
"exclude",
"=",
"[",
"]",
"include",
"=",
"[",
"]",
"for",
"path",
"in",
"globs",
":",
"if",
"path",
".",
"startswith",
"(",
"\"!\"",
")",
":",
"exclude",
".",
"append",
"(",
"path",
"[",
"1",
":",
"]",
")",
"else",
":",
"include",
".",
"append",
"(",
"path",
")",
"return",
"(",
"exclude",
",",
"include",
")"
] | Separate include and exclude globs. | [
"Separate",
"include",
"and",
"exclude",
"globs",
"."
] | 7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4 | https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/utils/glob.py#L15-L26 | train |
pylp/pylp | pylp/utils/glob.py | parse_glob | def parse_glob(path, included):
"""Parse a glob."""
files = glob.glob(path, recursive=True)
array = []
for file in files:
file = os.path.abspath(file)
if file not in included:
array.append(file)
included += array
return array | python | def parse_glob(path, included):
"""Parse a glob."""
files = glob.glob(path, recursive=True)
array = []
for file in files:
file = os.path.abspath(file)
if file not in included:
array.append(file)
included += array
return array | [
"def",
"parse_glob",
"(",
"path",
",",
"included",
")",
":",
"files",
"=",
"glob",
".",
"glob",
"(",
"path",
",",
"recursive",
"=",
"True",
")",
"array",
"=",
"[",
"]",
"for",
"file",
"in",
"files",
":",
"file",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"file",
")",
"if",
"file",
"not",
"in",
"included",
":",
"array",
".",
"append",
"(",
"file",
")",
"included",
"+=",
"array",
"return",
"array"
] | Parse a glob. | [
"Parse",
"a",
"glob",
"."
] | 7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4 | https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/utils/glob.py#L30-L42 | train |
pylp/pylp | pylp/utils/glob.py | find_base | def find_base(path):
"""Find the base of a glob."""
result = _pattern.match(path)
if result:
base = result.group(0)
else:
base = "./"
if base.endswith('/') or base.endswith('\\'):
return os.path.abspath(base)
else:
return os.path.dirname(os.path.abspath(base)) | python | def find_base(path):
"""Find the base of a glob."""
result = _pattern.match(path)
if result:
base = result.group(0)
else:
base = "./"
if base.endswith('/') or base.endswith('\\'):
return os.path.abspath(base)
else:
return os.path.dirname(os.path.abspath(base)) | [
"def",
"find_base",
"(",
"path",
")",
":",
"result",
"=",
"_pattern",
".",
"match",
"(",
"path",
")",
"if",
"result",
":",
"base",
"=",
"result",
".",
"group",
"(",
"0",
")",
"else",
":",
"base",
"=",
"\"./\"",
"if",
"base",
".",
"endswith",
"(",
"'/'",
")",
"or",
"base",
".",
"endswith",
"(",
"'\\\\'",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"base",
")",
"else",
":",
"return",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"base",
")",
")"
] | Find the base of a glob. | [
"Find",
"the",
"base",
"of",
"a",
"glob",
"."
] | 7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4 | https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/utils/glob.py#L49-L61 | train |
guaix-ucm/numina | numina/array/wavecalib/check_wlcalib.py | fun_wv | def fun_wv(xchannel, crpix1, crval1, cdelt1):
"""Compute wavelengths from channels.
The wavelength calibration is provided through the usual parameters
CRPIX1, CRVAL1 and CDELT1.
Parameters
----------
xchannel : numpy array
Input channels where the wavelengths will be evaluated.
crpix1: float
CRPIX1 keyword.
crval1: float
CRVAL1 keyword.
cdelt1: float
CDELT1 keyword.
Returns
-------
wv : numpy array
Computed wavelengths
"""
wv = crval1 + (xchannel - crpix1) * cdelt1
return wv | python | def fun_wv(xchannel, crpix1, crval1, cdelt1):
"""Compute wavelengths from channels.
The wavelength calibration is provided through the usual parameters
CRPIX1, CRVAL1 and CDELT1.
Parameters
----------
xchannel : numpy array
Input channels where the wavelengths will be evaluated.
crpix1: float
CRPIX1 keyword.
crval1: float
CRVAL1 keyword.
cdelt1: float
CDELT1 keyword.
Returns
-------
wv : numpy array
Computed wavelengths
"""
wv = crval1 + (xchannel - crpix1) * cdelt1
return wv | [
"def",
"fun_wv",
"(",
"xchannel",
",",
"crpix1",
",",
"crval1",
",",
"cdelt1",
")",
":",
"wv",
"=",
"crval1",
"+",
"(",
"xchannel",
"-",
"crpix1",
")",
"*",
"cdelt1",
"return",
"wv"
] | Compute wavelengths from channels.
The wavelength calibration is provided through the usual parameters
CRPIX1, CRVAL1 and CDELT1.
Parameters
----------
xchannel : numpy array
Input channels where the wavelengths will be evaluated.
crpix1: float
CRPIX1 keyword.
crval1: float
CRVAL1 keyword.
cdelt1: float
CDELT1 keyword.
Returns
-------
wv : numpy array
Computed wavelengths | [
"Compute",
"wavelengths",
"from",
"channels",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/check_wlcalib.py#L109-L133 | train |
guaix-ucm/numina | numina/array/wavecalib/check_wlcalib.py | update_poly_wlcalib | def update_poly_wlcalib(coeff_ini, coeff_residuals, naxis1_ini, debugplot):
"""Update wavelength calibration polynomial using the residuals fit.
The idea is to repeat the original fit using the information
previously computed with the function check_wlcalib_sp() in this
module.
Parameters
----------
coeff_ini : array like (floats)
Coefficients corresponding to the initial wavelength
calibration.
coeff_residuals: array like (floats)
Coefficients corresponding to the fit performed by the
function check_wlcalib_sp() in this module.
naxis1_ini : int
NAXIS1 in original spectrum employed to fit the initial
wavelength calibration.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
coeff_end : numpy array (floats)
Updated coefficients.
"""
# define initial wavelength calibration polynomial (use generic
# code valid for lists of numpy.arrays)
coeff = []
for fdum in coeff_ini:
coeff.append(fdum)
poly_ini = np.polynomial.Polynomial(coeff)
poldeg_wlcalib = len(coeff) - 1
# return initial polynomial when there is no need to compute an
# updated version
if len(coeff_residuals) == 0:
return poly_ini.coef
else:
if np.count_nonzero(poly_ini.coef) == 0:
return poly_ini.coef
# define polynomial corresponding to the residuals fit carried
# out by check_wlcalib_sp()
coeff = []
for fdum in coeff_residuals:
coeff.append(fdum)
poly_residuals = np.polynomial.Polynomial(coeff)
# define new points to be fitted
xfit = np.zeros(naxis1_ini)
yfit = np.zeros(naxis1_ini)
for i in range(naxis1_ini):
xfit[i] = float(i + 1)
wv_tmp = poly_ini(xfit[i])
yfit[i] = wv_tmp + poly_residuals(wv_tmp)
# fit to get the updated polynomial
if len(xfit) > poldeg_wlcalib:
poldeg_effective = poldeg_wlcalib
else:
poldeg_effective = len(xfit) - 1
poly_updated, ydum = polfit_residuals(
x=xfit,
y=yfit,
deg=poldeg_effective,
debugplot=debugplot
)
# return coefficients of updated polynomial
return poly_updated.coef | python | def update_poly_wlcalib(coeff_ini, coeff_residuals, naxis1_ini, debugplot):
"""Update wavelength calibration polynomial using the residuals fit.
The idea is to repeat the original fit using the information
previously computed with the function check_wlcalib_sp() in this
module.
Parameters
----------
coeff_ini : array like (floats)
Coefficients corresponding to the initial wavelength
calibration.
coeff_residuals: array like (floats)
Coefficients corresponding to the fit performed by the
function check_wlcalib_sp() in this module.
naxis1_ini : int
NAXIS1 in original spectrum employed to fit the initial
wavelength calibration.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
coeff_end : numpy array (floats)
Updated coefficients.
"""
# define initial wavelength calibration polynomial (use generic
# code valid for lists of numpy.arrays)
coeff = []
for fdum in coeff_ini:
coeff.append(fdum)
poly_ini = np.polynomial.Polynomial(coeff)
poldeg_wlcalib = len(coeff) - 1
# return initial polynomial when there is no need to compute an
# updated version
if len(coeff_residuals) == 0:
return poly_ini.coef
else:
if np.count_nonzero(poly_ini.coef) == 0:
return poly_ini.coef
# define polynomial corresponding to the residuals fit carried
# out by check_wlcalib_sp()
coeff = []
for fdum in coeff_residuals:
coeff.append(fdum)
poly_residuals = np.polynomial.Polynomial(coeff)
# define new points to be fitted
xfit = np.zeros(naxis1_ini)
yfit = np.zeros(naxis1_ini)
for i in range(naxis1_ini):
xfit[i] = float(i + 1)
wv_tmp = poly_ini(xfit[i])
yfit[i] = wv_tmp + poly_residuals(wv_tmp)
# fit to get the updated polynomial
if len(xfit) > poldeg_wlcalib:
poldeg_effective = poldeg_wlcalib
else:
poldeg_effective = len(xfit) - 1
poly_updated, ydum = polfit_residuals(
x=xfit,
y=yfit,
deg=poldeg_effective,
debugplot=debugplot
)
# return coefficients of updated polynomial
return poly_updated.coef | [
"def",
"update_poly_wlcalib",
"(",
"coeff_ini",
",",
"coeff_residuals",
",",
"naxis1_ini",
",",
"debugplot",
")",
":",
"# define initial wavelength calibration polynomial (use generic",
"# code valid for lists of numpy.arrays)",
"coeff",
"=",
"[",
"]",
"for",
"fdum",
"in",
"coeff_ini",
":",
"coeff",
".",
"append",
"(",
"fdum",
")",
"poly_ini",
"=",
"np",
".",
"polynomial",
".",
"Polynomial",
"(",
"coeff",
")",
"poldeg_wlcalib",
"=",
"len",
"(",
"coeff",
")",
"-",
"1",
"# return initial polynomial when there is no need to compute an",
"# updated version",
"if",
"len",
"(",
"coeff_residuals",
")",
"==",
"0",
":",
"return",
"poly_ini",
".",
"coef",
"else",
":",
"if",
"np",
".",
"count_nonzero",
"(",
"poly_ini",
".",
"coef",
")",
"==",
"0",
":",
"return",
"poly_ini",
".",
"coef",
"# define polynomial corresponding to the residuals fit carried",
"# out by check_wlcalib_sp()",
"coeff",
"=",
"[",
"]",
"for",
"fdum",
"in",
"coeff_residuals",
":",
"coeff",
".",
"append",
"(",
"fdum",
")",
"poly_residuals",
"=",
"np",
".",
"polynomial",
".",
"Polynomial",
"(",
"coeff",
")",
"# define new points to be fitted",
"xfit",
"=",
"np",
".",
"zeros",
"(",
"naxis1_ini",
")",
"yfit",
"=",
"np",
".",
"zeros",
"(",
"naxis1_ini",
")",
"for",
"i",
"in",
"range",
"(",
"naxis1_ini",
")",
":",
"xfit",
"[",
"i",
"]",
"=",
"float",
"(",
"i",
"+",
"1",
")",
"wv_tmp",
"=",
"poly_ini",
"(",
"xfit",
"[",
"i",
"]",
")",
"yfit",
"[",
"i",
"]",
"=",
"wv_tmp",
"+",
"poly_residuals",
"(",
"wv_tmp",
")",
"# fit to get the updated polynomial",
"if",
"len",
"(",
"xfit",
")",
">",
"poldeg_wlcalib",
":",
"poldeg_effective",
"=",
"poldeg_wlcalib",
"else",
":",
"poldeg_effective",
"=",
"len",
"(",
"xfit",
")",
"-",
"1",
"poly_updated",
",",
"ydum",
"=",
"polfit_residuals",
"(",
"x",
"=",
"xfit",
",",
"y",
"=",
"yfit",
",",
"deg",
"=",
"poldeg_effective",
",",
"debugplot",
"=",
"debugplot",
")",
"# return coefficients of updated polynomial",
"return",
"poly_updated",
".",
"coef"
] | Update wavelength calibration polynomial using the residuals fit.
The idea is to repeat the original fit using the information
previously computed with the function check_wlcalib_sp() in this
module.
Parameters
----------
coeff_ini : array like (floats)
Coefficients corresponding to the initial wavelength
calibration.
coeff_residuals: array like (floats)
Coefficients corresponding to the fit performed by the
function check_wlcalib_sp() in this module.
naxis1_ini : int
NAXIS1 in original spectrum employed to fit the initial
wavelength calibration.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
coeff_end : numpy array (floats)
Updated coefficients. | [
"Update",
"wavelength",
"calibration",
"polynomial",
"using",
"the",
"residuals",
"fit",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/check_wlcalib.py#L620-L693 | train |
guaix-ucm/numina | numina/core/metarecipes.py | generate_docs | def generate_docs(klass):
"""Add documentation to generated classes"""
import numina.types.datatype
attrh = ('Attributes\n'
'----------\n')
doc = getattr(klass, '__doc__', None)
if doc is None or doc == '':
doc = "%s documentation." % klass.__name__
if len(klass.stored()):
doc = doc + '\n\n' + attrh
skeys = sorted(klass.stored().keys())
for key in skeys:
y = klass.stored()[key]
if isinstance(y, Requirement):
modo = 'requirement'
elif isinstance(y, Result):
modo = 'product'
else:
modo = ""
if y.type.isproduct():
tipo = y.type.__class__.__name__
elif isinstance(y.type, numina.types.datatype.PlainPythonType):
tipo = y.type.internal_type.__name__
else:
tipo = y.type.__class__.__name__
if y.optional:
if y.default_value():
modo = "%s, optional, default=%s" % (modo, y.default)
else:
modo = "%s, optional" % (modo,)
descript = y.description
if descript:
field = "%s : %s, %s\n %s\n" % (key, tipo, modo, descript)
else:
field = "%s : %s, %s\n" % (key, tipo, modo)
doc = doc + field
klass.__doc__ = doc
return klass | python | def generate_docs(klass):
"""Add documentation to generated classes"""
import numina.types.datatype
attrh = ('Attributes\n'
'----------\n')
doc = getattr(klass, '__doc__', None)
if doc is None or doc == '':
doc = "%s documentation." % klass.__name__
if len(klass.stored()):
doc = doc + '\n\n' + attrh
skeys = sorted(klass.stored().keys())
for key in skeys:
y = klass.stored()[key]
if isinstance(y, Requirement):
modo = 'requirement'
elif isinstance(y, Result):
modo = 'product'
else:
modo = ""
if y.type.isproduct():
tipo = y.type.__class__.__name__
elif isinstance(y.type, numina.types.datatype.PlainPythonType):
tipo = y.type.internal_type.__name__
else:
tipo = y.type.__class__.__name__
if y.optional:
if y.default_value():
modo = "%s, optional, default=%s" % (modo, y.default)
else:
modo = "%s, optional" % (modo,)
descript = y.description
if descript:
field = "%s : %s, %s\n %s\n" % (key, tipo, modo, descript)
else:
field = "%s : %s, %s\n" % (key, tipo, modo)
doc = doc + field
klass.__doc__ = doc
return klass | [
"def",
"generate_docs",
"(",
"klass",
")",
":",
"import",
"numina",
".",
"types",
".",
"datatype",
"attrh",
"=",
"(",
"'Attributes\\n'",
"'----------\\n'",
")",
"doc",
"=",
"getattr",
"(",
"klass",
",",
"'__doc__'",
",",
"None",
")",
"if",
"doc",
"is",
"None",
"or",
"doc",
"==",
"''",
":",
"doc",
"=",
"\"%s documentation.\"",
"%",
"klass",
".",
"__name__",
"if",
"len",
"(",
"klass",
".",
"stored",
"(",
")",
")",
":",
"doc",
"=",
"doc",
"+",
"'\\n\\n'",
"+",
"attrh",
"skeys",
"=",
"sorted",
"(",
"klass",
".",
"stored",
"(",
")",
".",
"keys",
"(",
")",
")",
"for",
"key",
"in",
"skeys",
":",
"y",
"=",
"klass",
".",
"stored",
"(",
")",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"y",
",",
"Requirement",
")",
":",
"modo",
"=",
"'requirement'",
"elif",
"isinstance",
"(",
"y",
",",
"Result",
")",
":",
"modo",
"=",
"'product'",
"else",
":",
"modo",
"=",
"\"\"",
"if",
"y",
".",
"type",
".",
"isproduct",
"(",
")",
":",
"tipo",
"=",
"y",
".",
"type",
".",
"__class__",
".",
"__name__",
"elif",
"isinstance",
"(",
"y",
".",
"type",
",",
"numina",
".",
"types",
".",
"datatype",
".",
"PlainPythonType",
")",
":",
"tipo",
"=",
"y",
".",
"type",
".",
"internal_type",
".",
"__name__",
"else",
":",
"tipo",
"=",
"y",
".",
"type",
".",
"__class__",
".",
"__name__",
"if",
"y",
".",
"optional",
":",
"if",
"y",
".",
"default_value",
"(",
")",
":",
"modo",
"=",
"\"%s, optional, default=%s\"",
"%",
"(",
"modo",
",",
"y",
".",
"default",
")",
"else",
":",
"modo",
"=",
"\"%s, optional\"",
"%",
"(",
"modo",
",",
")",
"descript",
"=",
"y",
".",
"description",
"if",
"descript",
":",
"field",
"=",
"\"%s : %s, %s\\n %s\\n\"",
"%",
"(",
"key",
",",
"tipo",
",",
"modo",
",",
"descript",
")",
"else",
":",
"field",
"=",
"\"%s : %s, %s\\n\"",
"%",
"(",
"key",
",",
"tipo",
",",
"modo",
")",
"doc",
"=",
"doc",
"+",
"field",
"klass",
".",
"__doc__",
"=",
"doc",
"return",
"klass"
] | Add documentation to generated classes | [
"Add",
"documentation",
"to",
"generated",
"classes"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/metarecipes.py#L83-L129 | train |
xflr6/bitsets | bitsets/integers.py | reinverted | def reinverted(n, r):
"""Integer with reversed and inverted bits of n assuming bit length r.
>>> reinverted(1, 6)
31
>>> [reinverted(x, 6) for x in [7, 11, 13, 14, 19, 21, 22, 25, 26, 28]]
[7, 11, 19, 35, 13, 21, 37, 25, 41, 49]
"""
result = 0
r = 1 << (r - 1)
while n:
if not n & 1:
result |= r
r >>= 1
n >>= 1
if r:
result |= (r << 1) - 1
return result | python | def reinverted(n, r):
"""Integer with reversed and inverted bits of n assuming bit length r.
>>> reinverted(1, 6)
31
>>> [reinverted(x, 6) for x in [7, 11, 13, 14, 19, 21, 22, 25, 26, 28]]
[7, 11, 19, 35, 13, 21, 37, 25, 41, 49]
"""
result = 0
r = 1 << (r - 1)
while n:
if not n & 1:
result |= r
r >>= 1
n >>= 1
if r:
result |= (r << 1) - 1
return result | [
"def",
"reinverted",
"(",
"n",
",",
"r",
")",
":",
"result",
"=",
"0",
"r",
"=",
"1",
"<<",
"(",
"r",
"-",
"1",
")",
"while",
"n",
":",
"if",
"not",
"n",
"&",
"1",
":",
"result",
"|=",
"r",
"r",
">>=",
"1",
"n",
">>=",
"1",
"if",
"r",
":",
"result",
"|=",
"(",
"r",
"<<",
"1",
")",
"-",
"1",
"return",
"result"
] | Integer with reversed and inverted bits of n assuming bit length r.
>>> reinverted(1, 6)
31
>>> [reinverted(x, 6) for x in [7, 11, 13, 14, 19, 21, 22, 25, 26, 28]]
[7, 11, 19, 35, 13, 21, 37, 25, 41, 49] | [
"Integer",
"with",
"reversed",
"and",
"inverted",
"bits",
"of",
"n",
"assuming",
"bit",
"length",
"r",
"."
] | ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf | https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/integers.py#L35-L53 | train |
xflr6/bitsets | bitsets/integers.py | rank | def rank(items, sequence=string.ascii_lowercase):
"""Rank items from sequence in colexicographical order.
>>> [rank(i) for i in ('', 'a', 'b', 'ab', 'c')]
[0, 1, 2, 3, 4]
>>> rank('spam')
299009
"""
items = set(items)
return sum(1 << i for i, s in enumerate(sequence) if s in items) | python | def rank(items, sequence=string.ascii_lowercase):
"""Rank items from sequence in colexicographical order.
>>> [rank(i) for i in ('', 'a', 'b', 'ab', 'c')]
[0, 1, 2, 3, 4]
>>> rank('spam')
299009
"""
items = set(items)
return sum(1 << i for i, s in enumerate(sequence) if s in items) | [
"def",
"rank",
"(",
"items",
",",
"sequence",
"=",
"string",
".",
"ascii_lowercase",
")",
":",
"items",
"=",
"set",
"(",
"items",
")",
"return",
"sum",
"(",
"1",
"<<",
"i",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"sequence",
")",
"if",
"s",
"in",
"items",
")"
] | Rank items from sequence in colexicographical order.
>>> [rank(i) for i in ('', 'a', 'b', 'ab', 'c')]
[0, 1, 2, 3, 4]
>>> rank('spam')
299009 | [
"Rank",
"items",
"from",
"sequence",
"in",
"colexicographical",
"order",
"."
] | ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf | https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/integers.py#L56-L66 | train |
xflr6/bitsets | bitsets/integers.py | unrank | def unrank(n, sequence=string.ascii_lowercase):
"""Unrank n from sequence in colexicographical order.
>>> [''.join(unrank(i)) for i in range(8)]
['', 'a', 'b', 'ab', 'c', 'ac', 'bc', 'abc']
>>> unrank(299009)
['a', 'm', 'p', 's']
"""
return list(map(sequence.__getitem__, indexes(n))) | python | def unrank(n, sequence=string.ascii_lowercase):
"""Unrank n from sequence in colexicographical order.
>>> [''.join(unrank(i)) for i in range(8)]
['', 'a', 'b', 'ab', 'c', 'ac', 'bc', 'abc']
>>> unrank(299009)
['a', 'm', 'p', 's']
"""
return list(map(sequence.__getitem__, indexes(n))) | [
"def",
"unrank",
"(",
"n",
",",
"sequence",
"=",
"string",
".",
"ascii_lowercase",
")",
":",
"return",
"list",
"(",
"map",
"(",
"sequence",
".",
"__getitem__",
",",
"indexes",
"(",
"n",
")",
")",
")"
] | Unrank n from sequence in colexicographical order.
>>> [''.join(unrank(i)) for i in range(8)]
['', 'a', 'b', 'ab', 'c', 'ac', 'bc', 'abc']
>>> unrank(299009)
['a', 'm', 'p', 's'] | [
"Unrank",
"n",
"from",
"sequence",
"in",
"colexicographical",
"order",
"."
] | ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf | https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/integers.py#L69-L78 | train |
guaix-ucm/numina | numina/array/background.py | background_estimator | def background_estimator(bdata):
"""Estimate the background in a 2D array"""
crowded = False
std = numpy.std(bdata)
std0 = std
mean = bdata.mean()
while True:
prep = len(bdata)
numpy.clip(bdata, mean - 3 * std, mean + 3 * std, out=bdata)
if prep == len(bdata):
if std < 0.8 * std0:
crowded = True
break
std = numpy.std(bdata)
mean = bdata.mean()
if crowded:
median = numpy.median(bdata)
mean = bdata.mean()
std = bdata.std()
return 2.5 * median - 1.5 * mean, std
return bdata.mean(), bdata.std() | python | def background_estimator(bdata):
"""Estimate the background in a 2D array"""
crowded = False
std = numpy.std(bdata)
std0 = std
mean = bdata.mean()
while True:
prep = len(bdata)
numpy.clip(bdata, mean - 3 * std, mean + 3 * std, out=bdata)
if prep == len(bdata):
if std < 0.8 * std0:
crowded = True
break
std = numpy.std(bdata)
mean = bdata.mean()
if crowded:
median = numpy.median(bdata)
mean = bdata.mean()
std = bdata.std()
return 2.5 * median - 1.5 * mean, std
return bdata.mean(), bdata.std() | [
"def",
"background_estimator",
"(",
"bdata",
")",
":",
"crowded",
"=",
"False",
"std",
"=",
"numpy",
".",
"std",
"(",
"bdata",
")",
"std0",
"=",
"std",
"mean",
"=",
"bdata",
".",
"mean",
"(",
")",
"while",
"True",
":",
"prep",
"=",
"len",
"(",
"bdata",
")",
"numpy",
".",
"clip",
"(",
"bdata",
",",
"mean",
"-",
"3",
"*",
"std",
",",
"mean",
"+",
"3",
"*",
"std",
",",
"out",
"=",
"bdata",
")",
"if",
"prep",
"==",
"len",
"(",
"bdata",
")",
":",
"if",
"std",
"<",
"0.8",
"*",
"std0",
":",
"crowded",
"=",
"True",
"break",
"std",
"=",
"numpy",
".",
"std",
"(",
"bdata",
")",
"mean",
"=",
"bdata",
".",
"mean",
"(",
")",
"if",
"crowded",
":",
"median",
"=",
"numpy",
".",
"median",
"(",
"bdata",
")",
"mean",
"=",
"bdata",
".",
"mean",
"(",
")",
"std",
"=",
"bdata",
".",
"std",
"(",
")",
"return",
"2.5",
"*",
"median",
"-",
"1.5",
"*",
"mean",
",",
"std",
"return",
"bdata",
".",
"mean",
"(",
")",
",",
"bdata",
".",
"std",
"(",
")"
] | Estimate the background in a 2D array | [
"Estimate",
"the",
"background",
"in",
"a",
"2D",
"array"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/background.py#L37-L61 | train |
guaix-ucm/numina | numina/array/background.py | create_background_map | def create_background_map(data, bsx, bsy):
"""Create a background map with a given mesh size"""
sx, sy = data.shape
mx = sx // bsx
my = sy // bsy
comp = []
rms = []
# Rows
sp = numpy.split(data, numpy.arange(bsx, sx, bsx), axis=0)
for s in sp:
# Columns
rp = numpy.split(s, numpy.arange(bsy, sy, bsy), axis=1)
for r in rp:
b, r = background_estimator(r)
comp.append(b)
rms.append(r)
# Reconstructed image
z = numpy.array(comp)
z.shape = (mx, my)
# median filter
ndfilter.median_filter(z, size=(3, 3), output=z)
# Interpolate to the original size
new = _interpolation(z, sx, sy, mx, my)
# Interpolate the rms
z = numpy.array(rms)
z.shape = (mx, my)
nrms = _interpolation(z, sx, sy, mx, my)
return new, nrms | python | def create_background_map(data, bsx, bsy):
"""Create a background map with a given mesh size"""
sx, sy = data.shape
mx = sx // bsx
my = sy // bsy
comp = []
rms = []
# Rows
sp = numpy.split(data, numpy.arange(bsx, sx, bsx), axis=0)
for s in sp:
# Columns
rp = numpy.split(s, numpy.arange(bsy, sy, bsy), axis=1)
for r in rp:
b, r = background_estimator(r)
comp.append(b)
rms.append(r)
# Reconstructed image
z = numpy.array(comp)
z.shape = (mx, my)
# median filter
ndfilter.median_filter(z, size=(3, 3), output=z)
# Interpolate to the original size
new = _interpolation(z, sx, sy, mx, my)
# Interpolate the rms
z = numpy.array(rms)
z.shape = (mx, my)
nrms = _interpolation(z, sx, sy, mx, my)
return new, nrms | [
"def",
"create_background_map",
"(",
"data",
",",
"bsx",
",",
"bsy",
")",
":",
"sx",
",",
"sy",
"=",
"data",
".",
"shape",
"mx",
"=",
"sx",
"//",
"bsx",
"my",
"=",
"sy",
"//",
"bsy",
"comp",
"=",
"[",
"]",
"rms",
"=",
"[",
"]",
"# Rows",
"sp",
"=",
"numpy",
".",
"split",
"(",
"data",
",",
"numpy",
".",
"arange",
"(",
"bsx",
",",
"sx",
",",
"bsx",
")",
",",
"axis",
"=",
"0",
")",
"for",
"s",
"in",
"sp",
":",
"# Columns",
"rp",
"=",
"numpy",
".",
"split",
"(",
"s",
",",
"numpy",
".",
"arange",
"(",
"bsy",
",",
"sy",
",",
"bsy",
")",
",",
"axis",
"=",
"1",
")",
"for",
"r",
"in",
"rp",
":",
"b",
",",
"r",
"=",
"background_estimator",
"(",
"r",
")",
"comp",
".",
"append",
"(",
"b",
")",
"rms",
".",
"append",
"(",
"r",
")",
"# Reconstructed image",
"z",
"=",
"numpy",
".",
"array",
"(",
"comp",
")",
"z",
".",
"shape",
"=",
"(",
"mx",
",",
"my",
")",
"# median filter",
"ndfilter",
".",
"median_filter",
"(",
"z",
",",
"size",
"=",
"(",
"3",
",",
"3",
")",
",",
"output",
"=",
"z",
")",
"# Interpolate to the original size",
"new",
"=",
"_interpolation",
"(",
"z",
",",
"sx",
",",
"sy",
",",
"mx",
",",
"my",
")",
"# Interpolate the rms",
"z",
"=",
"numpy",
".",
"array",
"(",
"rms",
")",
"z",
".",
"shape",
"=",
"(",
"mx",
",",
"my",
")",
"nrms",
"=",
"_interpolation",
"(",
"z",
",",
"sx",
",",
"sy",
",",
"mx",
",",
"my",
")",
"return",
"new",
",",
"nrms"
] | Create a background map with a given mesh size | [
"Create",
"a",
"background",
"map",
"with",
"a",
"given",
"mesh",
"size"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/background.py#L64-L95 | train |
ckan/losser | losser/losser.py | _read_columns_file | def _read_columns_file(f):
"""Return the list of column queries read from the given JSON file.
:param f: path to the file to read
:type f: string
:rtype: list of dicts
"""
try:
columns = json.loads(open(f, 'r').read(),
object_pairs_hook=collections.OrderedDict)
except Exception as err:
raise InvalidColumnsFileError(
"There was an error while reading {0}: {1}".format(f, err))
# Options are not supported yet:
if '__options' in columns:
del columns['__options']
return columns | python | def _read_columns_file(f):
"""Return the list of column queries read from the given JSON file.
:param f: path to the file to read
:type f: string
:rtype: list of dicts
"""
try:
columns = json.loads(open(f, 'r').read(),
object_pairs_hook=collections.OrderedDict)
except Exception as err:
raise InvalidColumnsFileError(
"There was an error while reading {0}: {1}".format(f, err))
# Options are not supported yet:
if '__options' in columns:
del columns['__options']
return columns | [
"def",
"_read_columns_file",
"(",
"f",
")",
":",
"try",
":",
"columns",
"=",
"json",
".",
"loads",
"(",
"open",
"(",
"f",
",",
"'r'",
")",
".",
"read",
"(",
")",
",",
"object_pairs_hook",
"=",
"collections",
".",
"OrderedDict",
")",
"except",
"Exception",
"as",
"err",
":",
"raise",
"InvalidColumnsFileError",
"(",
"\"There was an error while reading {0}: {1}\"",
".",
"format",
"(",
"f",
",",
"err",
")",
")",
"# Options are not supported yet:",
"if",
"'__options'",
"in",
"columns",
":",
"del",
"columns",
"[",
"'__options'",
"]",
"return",
"columns"
] | Return the list of column queries read from the given JSON file.
:param f: path to the file to read
:type f: string
:rtype: list of dicts | [
"Return",
"the",
"list",
"of",
"column",
"queries",
"read",
"from",
"the",
"given",
"JSON",
"file",
"."
] | fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f | https://github.com/ckan/losser/blob/fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f/losser/losser.py#L23-L43 | train |
ckan/losser | losser/losser.py | _table_to_csv | def _table_to_csv(table_):
"""Return the given table converted to a CSV string.
:param table: the table to convert
:type table: list of OrderedDicts each with the same keys in the same
order
:rtype: UTF8-encoded, CSV-formatted string
"""
f = cStringIO.StringIO()
try:
_write_csv(f, table_)
return f.getvalue()
finally:
f.close() | python | def _table_to_csv(table_):
"""Return the given table converted to a CSV string.
:param table: the table to convert
:type table: list of OrderedDicts each with the same keys in the same
order
:rtype: UTF8-encoded, CSV-formatted string
"""
f = cStringIO.StringIO()
try:
_write_csv(f, table_)
return f.getvalue()
finally:
f.close() | [
"def",
"_table_to_csv",
"(",
"table_",
")",
":",
"f",
"=",
"cStringIO",
".",
"StringIO",
"(",
")",
"try",
":",
"_write_csv",
"(",
"f",
",",
"table_",
")",
"return",
"f",
".",
"getvalue",
"(",
")",
"finally",
":",
"f",
".",
"close",
"(",
")"
] | Return the given table converted to a CSV string.
:param table: the table to convert
:type table: list of OrderedDicts each with the same keys in the same
order
:rtype: UTF8-encoded, CSV-formatted string | [
"Return",
"the",
"given",
"table",
"converted",
"to",
"a",
"CSV",
"string",
"."
] | fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f | https://github.com/ckan/losser/blob/fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f/losser/losser.py#L76-L91 | train |
ckan/losser | losser/losser.py | table | def table(dicts, columns, csv=False, pretty=False):
"""Query a list of dicts with a list of queries and return a table.
A "table" is a list of OrderedDicts each having the same keys in the same
order.
:param dicts: the list of input dicts
:type dicts: list of dicts
:param columns: the list of column query dicts, or the path to a JSON file
containing the list of column query dicts
:type columns: list of dicts, or string
:param csv: return a UTF8-encoded, CSV-formatted string instead of a list
of dicts
:type csv: bool
:rtype: list of dicts, or CSV string
"""
# Optionally read columns from file.
if isinstance(columns, basestring):
columns = _read_columns_file(columns)
# Either "pattern" or "pattern_path" (but not both) is allowed in the
# columns.json file, but "pattern" gets normalised to "pattern_path" here.
for column in columns.values():
if "pattern" in column:
assert "pattern_path" not in column, (
'A column must have either a "pattern" or a "pattern_path"'
"but not both")
column["pattern_path"] = column["pattern"]
del column["pattern"]
table_ = []
for d in dicts:
row = collections.OrderedDict() # The row we'll return in the table.
for column_title, column_spec in columns.items():
if not column_spec.get('return_multiple_columns', False):
row[column_title] = query(dict_=d, **column_spec)
else:
multiple_columns = query(dict_=d, **column_spec)
for k, v in multiple_columns.items():
row[k] = v
table_.append(row)
if pretty:
# Return a pretty-printed string (looks like a nice table when printed
# to stdout).
return tabulate.tabulate(table_, tablefmt="grid", headers="keys")
elif csv:
# Return a string of CSV-formatted text.
return _table_to_csv(table_)
else:
# Return a list of dicts.
return table_ | python | def table(dicts, columns, csv=False, pretty=False):
"""Query a list of dicts with a list of queries and return a table.
A "table" is a list of OrderedDicts each having the same keys in the same
order.
:param dicts: the list of input dicts
:type dicts: list of dicts
:param columns: the list of column query dicts, or the path to a JSON file
containing the list of column query dicts
:type columns: list of dicts, or string
:param csv: return a UTF8-encoded, CSV-formatted string instead of a list
of dicts
:type csv: bool
:rtype: list of dicts, or CSV string
"""
# Optionally read columns from file.
if isinstance(columns, basestring):
columns = _read_columns_file(columns)
# Either "pattern" or "pattern_path" (but not both) is allowed in the
# columns.json file, but "pattern" gets normalised to "pattern_path" here.
for column in columns.values():
if "pattern" in column:
assert "pattern_path" not in column, (
'A column must have either a "pattern" or a "pattern_path"'
"but not both")
column["pattern_path"] = column["pattern"]
del column["pattern"]
table_ = []
for d in dicts:
row = collections.OrderedDict() # The row we'll return in the table.
for column_title, column_spec in columns.items():
if not column_spec.get('return_multiple_columns', False):
row[column_title] = query(dict_=d, **column_spec)
else:
multiple_columns = query(dict_=d, **column_spec)
for k, v in multiple_columns.items():
row[k] = v
table_.append(row)
if pretty:
# Return a pretty-printed string (looks like a nice table when printed
# to stdout).
return tabulate.tabulate(table_, tablefmt="grid", headers="keys")
elif csv:
# Return a string of CSV-formatted text.
return _table_to_csv(table_)
else:
# Return a list of dicts.
return table_ | [
"def",
"table",
"(",
"dicts",
",",
"columns",
",",
"csv",
"=",
"False",
",",
"pretty",
"=",
"False",
")",
":",
"# Optionally read columns from file.",
"if",
"isinstance",
"(",
"columns",
",",
"basestring",
")",
":",
"columns",
"=",
"_read_columns_file",
"(",
"columns",
")",
"# Either \"pattern\" or \"pattern_path\" (but not both) is allowed in the",
"# columns.json file, but \"pattern\" gets normalised to \"pattern_path\" here.",
"for",
"column",
"in",
"columns",
".",
"values",
"(",
")",
":",
"if",
"\"pattern\"",
"in",
"column",
":",
"assert",
"\"pattern_path\"",
"not",
"in",
"column",
",",
"(",
"'A column must have either a \"pattern\" or a \"pattern_path\"'",
"\"but not both\"",
")",
"column",
"[",
"\"pattern_path\"",
"]",
"=",
"column",
"[",
"\"pattern\"",
"]",
"del",
"column",
"[",
"\"pattern\"",
"]",
"table_",
"=",
"[",
"]",
"for",
"d",
"in",
"dicts",
":",
"row",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"# The row we'll return in the table.",
"for",
"column_title",
",",
"column_spec",
"in",
"columns",
".",
"items",
"(",
")",
":",
"if",
"not",
"column_spec",
".",
"get",
"(",
"'return_multiple_columns'",
",",
"False",
")",
":",
"row",
"[",
"column_title",
"]",
"=",
"query",
"(",
"dict_",
"=",
"d",
",",
"*",
"*",
"column_spec",
")",
"else",
":",
"multiple_columns",
"=",
"query",
"(",
"dict_",
"=",
"d",
",",
"*",
"*",
"column_spec",
")",
"for",
"k",
",",
"v",
"in",
"multiple_columns",
".",
"items",
"(",
")",
":",
"row",
"[",
"k",
"]",
"=",
"v",
"table_",
".",
"append",
"(",
"row",
")",
"if",
"pretty",
":",
"# Return a pretty-printed string (looks like a nice table when printed",
"# to stdout).",
"return",
"tabulate",
".",
"tabulate",
"(",
"table_",
",",
"tablefmt",
"=",
"\"grid\"",
",",
"headers",
"=",
"\"keys\"",
")",
"elif",
"csv",
":",
"# Return a string of CSV-formatted text.",
"return",
"_table_to_csv",
"(",
"table_",
")",
"else",
":",
"# Return a list of dicts.",
"return",
"table_"
] | Query a list of dicts with a list of queries and return a table.
A "table" is a list of OrderedDicts each having the same keys in the same
order.
:param dicts: the list of input dicts
:type dicts: list of dicts
:param columns: the list of column query dicts, or the path to a JSON file
containing the list of column query dicts
:type columns: list of dicts, or string
:param csv: return a UTF8-encoded, CSV-formatted string instead of a list
of dicts
:type csv: bool
:rtype: list of dicts, or CSV string | [
"Query",
"a",
"list",
"of",
"dicts",
"with",
"a",
"list",
"of",
"queries",
"and",
"return",
"a",
"table",
"."
] | fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f | https://github.com/ckan/losser/blob/fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f/losser/losser.py#L94-L151 | train |
ckan/losser | losser/losser.py | query | def query(pattern_path, dict_, max_length=None, strip=False,
case_sensitive=False, unique=False, deduplicate=False,
string_transformations=None, hyperlink=False,
return_multiple_columns=False):
"""Query the given dict with the given pattern path and return the result.
The ``pattern_path`` is a either a single regular expression string or a
list of regex strings that will be matched against the keys of the dict and
its subdicts to find the value(s) in the dict to return.
The returned result is either a single value (None, "foo", 42, False...)
or (if the pattern path matched multiple values in the dict) a list of
values.
If the dict contains sub-lists or sub-dicts values from these will be
flattened into a simple flat list to be returned.
"""
if string_transformations is None:
string_transformations = []
if max_length:
string_transformations.append(lambda x: x[:max_length])
if hyperlink:
string_transformations.append(
lambda x: '=HYPERLINK("{0}")'.format(x))
if isinstance(pattern_path, basestring):
pattern_path = [pattern_path]
# Copy the pattern_path because we're going to modify it which can be
# unexpected and confusing to user code.
original_pattern_path = pattern_path
pattern_path = pattern_path[:]
# We're going to be popping strings off the end of the pattern path
# (because Python lists don't come with a convenient pop-from-front method)
# so we need the list in reverse order.
pattern_path.reverse()
result = _process_object(pattern_path, dict_,
string_transformations=string_transformations,
strip=strip, case_sensitive=case_sensitive,
return_multiple_columns=return_multiple_columns)
if not result:
return None # Empty lists finally get turned into None.
elif isinstance(result, dict):
return _flatten(result)
elif len(result) == 1:
return result[0] # One-item lists just get turned into the item.
else:
if unique:
msg = "pattern_path: {0}\n\n".format(original_pattern_path)
msg = msg + pprint.pformat(dict_)
raise UniqueError(msg)
if deduplicate:
# Deduplicate the list while maintaining order.
new_result = []
for item in result:
if item not in new_result:
new_result.append(item)
result = new_result
return result | python | def query(pattern_path, dict_, max_length=None, strip=False,
case_sensitive=False, unique=False, deduplicate=False,
string_transformations=None, hyperlink=False,
return_multiple_columns=False):
"""Query the given dict with the given pattern path and return the result.
The ``pattern_path`` is a either a single regular expression string or a
list of regex strings that will be matched against the keys of the dict and
its subdicts to find the value(s) in the dict to return.
The returned result is either a single value (None, "foo", 42, False...)
or (if the pattern path matched multiple values in the dict) a list of
values.
If the dict contains sub-lists or sub-dicts values from these will be
flattened into a simple flat list to be returned.
"""
if string_transformations is None:
string_transformations = []
if max_length:
string_transformations.append(lambda x: x[:max_length])
if hyperlink:
string_transformations.append(
lambda x: '=HYPERLINK("{0}")'.format(x))
if isinstance(pattern_path, basestring):
pattern_path = [pattern_path]
# Copy the pattern_path because we're going to modify it which can be
# unexpected and confusing to user code.
original_pattern_path = pattern_path
pattern_path = pattern_path[:]
# We're going to be popping strings off the end of the pattern path
# (because Python lists don't come with a convenient pop-from-front method)
# so we need the list in reverse order.
pattern_path.reverse()
result = _process_object(pattern_path, dict_,
string_transformations=string_transformations,
strip=strip, case_sensitive=case_sensitive,
return_multiple_columns=return_multiple_columns)
if not result:
return None # Empty lists finally get turned into None.
elif isinstance(result, dict):
return _flatten(result)
elif len(result) == 1:
return result[0] # One-item lists just get turned into the item.
else:
if unique:
msg = "pattern_path: {0}\n\n".format(original_pattern_path)
msg = msg + pprint.pformat(dict_)
raise UniqueError(msg)
if deduplicate:
# Deduplicate the list while maintaining order.
new_result = []
for item in result:
if item not in new_result:
new_result.append(item)
result = new_result
return result | [
"def",
"query",
"(",
"pattern_path",
",",
"dict_",
",",
"max_length",
"=",
"None",
",",
"strip",
"=",
"False",
",",
"case_sensitive",
"=",
"False",
",",
"unique",
"=",
"False",
",",
"deduplicate",
"=",
"False",
",",
"string_transformations",
"=",
"None",
",",
"hyperlink",
"=",
"False",
",",
"return_multiple_columns",
"=",
"False",
")",
":",
"if",
"string_transformations",
"is",
"None",
":",
"string_transformations",
"=",
"[",
"]",
"if",
"max_length",
":",
"string_transformations",
".",
"append",
"(",
"lambda",
"x",
":",
"x",
"[",
":",
"max_length",
"]",
")",
"if",
"hyperlink",
":",
"string_transformations",
".",
"append",
"(",
"lambda",
"x",
":",
"'=HYPERLINK(\"{0}\")'",
".",
"format",
"(",
"x",
")",
")",
"if",
"isinstance",
"(",
"pattern_path",
",",
"basestring",
")",
":",
"pattern_path",
"=",
"[",
"pattern_path",
"]",
"# Copy the pattern_path because we're going to modify it which can be",
"# unexpected and confusing to user code.",
"original_pattern_path",
"=",
"pattern_path",
"pattern_path",
"=",
"pattern_path",
"[",
":",
"]",
"# We're going to be popping strings off the end of the pattern path",
"# (because Python lists don't come with a convenient pop-from-front method)",
"# so we need the list in reverse order.",
"pattern_path",
".",
"reverse",
"(",
")",
"result",
"=",
"_process_object",
"(",
"pattern_path",
",",
"dict_",
",",
"string_transformations",
"=",
"string_transformations",
",",
"strip",
"=",
"strip",
",",
"case_sensitive",
"=",
"case_sensitive",
",",
"return_multiple_columns",
"=",
"return_multiple_columns",
")",
"if",
"not",
"result",
":",
"return",
"None",
"# Empty lists finally get turned into None.",
"elif",
"isinstance",
"(",
"result",
",",
"dict",
")",
":",
"return",
"_flatten",
"(",
"result",
")",
"elif",
"len",
"(",
"result",
")",
"==",
"1",
":",
"return",
"result",
"[",
"0",
"]",
"# One-item lists just get turned into the item.",
"else",
":",
"if",
"unique",
":",
"msg",
"=",
"\"pattern_path: {0}\\n\\n\"",
".",
"format",
"(",
"original_pattern_path",
")",
"msg",
"=",
"msg",
"+",
"pprint",
".",
"pformat",
"(",
"dict_",
")",
"raise",
"UniqueError",
"(",
"msg",
")",
"if",
"deduplicate",
":",
"# Deduplicate the list while maintaining order.",
"new_result",
"=",
"[",
"]",
"for",
"item",
"in",
"result",
":",
"if",
"item",
"not",
"in",
"new_result",
":",
"new_result",
".",
"append",
"(",
"item",
")",
"result",
"=",
"new_result",
"return",
"result"
] | Query the given dict with the given pattern path and return the result.
The ``pattern_path`` is a either a single regular expression string or a
list of regex strings that will be matched against the keys of the dict and
its subdicts to find the value(s) in the dict to return.
The returned result is either a single value (None, "foo", 42, False...)
or (if the pattern path matched multiple values in the dict) a list of
values.
If the dict contains sub-lists or sub-dicts values from these will be
flattened into a simple flat list to be returned. | [
"Query",
"the",
"given",
"dict",
"with",
"the",
"given",
"pattern",
"path",
"and",
"return",
"the",
"result",
"."
] | fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f | https://github.com/ckan/losser/blob/fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f/losser/losser.py#L154-L218 | train |
SUNCAT-Center/CatHub | cathub/query.py | get_reactions | def get_reactions(columns='all', n_results=20, write_db=False, **kwargs):
"""
Get reactions from server
Give key value strings as arguments
"""
if write_db or columns == 'all':
columns = all_columns['reactions']
queries = {}
for key, value in kwargs.items():
key = map_column_names(key)
if key == 'distinct':
if value in [True, 'True', 'true']:
queries.update({key: True})
continue
if isinstance(value, int) or isinstance(value, float):
queries.update({key: value})
else:
queries.update({key: '{0}'.format(value)})
subtables = []
if write_db:
subtables = ['reactionSystems', 'publication']
else:
subtables = []
data = query(table='reactions', subtables=subtables,
columns=columns,
n_results=n_results, queries=queries)
if not write_db:
return data
print('Writing result to Reactions.db')
unique_ids = []
for row in data['reactions']['edges']:
with CathubSQLite('Reactions.db') as db:
row = row['node']
key_values = {}
for key in all_columns['reactions']:
v = row[key]
# if isinstance(v, unicode):
# v = v.encode('utf-8')
try:
v = json.loads(v)
except BaseException:
pass
key_values[convert(key)] = v
ase_ids = {}
energy_corrections = {}
for row_rs in row['reactionSystems']:
if row_rs['name'] == 'N/A':
continue
ase_ids[row_rs['name']] = row_rs['aseId']
energy_corrections[row_rs['name']] = row_rs['energyCorrection']
if not ase_ids:
ase_ids = None
energy_corrections = None
else:
unique_ids += ase_ids.values()
key_values['ase_ids'] = ase_ids
key_values['energy_corrections'] = ase_ids
# publications
pub_key_values = {}
row_p = row['publication']
for key in all_columns['publications']:
pub_key_values[convert(key)] = row_p[key]
db.write_publication(pub_key_values)
# reactions and reaction_systems
id = db.check(key_values['chemical_composition'],
key_values['reaction_energy'])
if id is None:
id = db.write(key_values)
else:
db.update(id, key_values)
if ase_ids is not None:
# Ase structures
with ase.db.connect('Reactions.db') as ase_db:
con = ase_db.connection
cur = con.cursor()
cur.execute('SELECT unique_id from systems;')
unique_ids0 = cur.fetchall()
unique_ids0 = [un[0] for un in unique_ids0]
unique_ids = [un for un in unique_ids if un not in unique_ids0]
for unique_id in list(set(unique_ids)):
# if ase_db.count('unique_id={}'.format(unique_id)) == 0:
atomsrow = get_atomsrow_by_id(unique_id)
ase_db.write(atomsrow)
print('Writing complete!')
return data | python | def get_reactions(columns='all', n_results=20, write_db=False, **kwargs):
"""
Get reactions from server
Give key value strings as arguments
"""
if write_db or columns == 'all':
columns = all_columns['reactions']
queries = {}
for key, value in kwargs.items():
key = map_column_names(key)
if key == 'distinct':
if value in [True, 'True', 'true']:
queries.update({key: True})
continue
if isinstance(value, int) or isinstance(value, float):
queries.update({key: value})
else:
queries.update({key: '{0}'.format(value)})
subtables = []
if write_db:
subtables = ['reactionSystems', 'publication']
else:
subtables = []
data = query(table='reactions', subtables=subtables,
columns=columns,
n_results=n_results, queries=queries)
if not write_db:
return data
print('Writing result to Reactions.db')
unique_ids = []
for row in data['reactions']['edges']:
with CathubSQLite('Reactions.db') as db:
row = row['node']
key_values = {}
for key in all_columns['reactions']:
v = row[key]
# if isinstance(v, unicode):
# v = v.encode('utf-8')
try:
v = json.loads(v)
except BaseException:
pass
key_values[convert(key)] = v
ase_ids = {}
energy_corrections = {}
for row_rs in row['reactionSystems']:
if row_rs['name'] == 'N/A':
continue
ase_ids[row_rs['name']] = row_rs['aseId']
energy_corrections[row_rs['name']] = row_rs['energyCorrection']
if not ase_ids:
ase_ids = None
energy_corrections = None
else:
unique_ids += ase_ids.values()
key_values['ase_ids'] = ase_ids
key_values['energy_corrections'] = ase_ids
# publications
pub_key_values = {}
row_p = row['publication']
for key in all_columns['publications']:
pub_key_values[convert(key)] = row_p[key]
db.write_publication(pub_key_values)
# reactions and reaction_systems
id = db.check(key_values['chemical_composition'],
key_values['reaction_energy'])
if id is None:
id = db.write(key_values)
else:
db.update(id, key_values)
if ase_ids is not None:
# Ase structures
with ase.db.connect('Reactions.db') as ase_db:
con = ase_db.connection
cur = con.cursor()
cur.execute('SELECT unique_id from systems;')
unique_ids0 = cur.fetchall()
unique_ids0 = [un[0] for un in unique_ids0]
unique_ids = [un for un in unique_ids if un not in unique_ids0]
for unique_id in list(set(unique_ids)):
# if ase_db.count('unique_id={}'.format(unique_id)) == 0:
atomsrow = get_atomsrow_by_id(unique_id)
ase_db.write(atomsrow)
print('Writing complete!')
return data | [
"def",
"get_reactions",
"(",
"columns",
"=",
"'all'",
",",
"n_results",
"=",
"20",
",",
"write_db",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"write_db",
"or",
"columns",
"==",
"'all'",
":",
"columns",
"=",
"all_columns",
"[",
"'reactions'",
"]",
"queries",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"key",
"=",
"map_column_names",
"(",
"key",
")",
"if",
"key",
"==",
"'distinct'",
":",
"if",
"value",
"in",
"[",
"True",
",",
"'True'",
",",
"'true'",
"]",
":",
"queries",
".",
"update",
"(",
"{",
"key",
":",
"True",
"}",
")",
"continue",
"if",
"isinstance",
"(",
"value",
",",
"int",
")",
"or",
"isinstance",
"(",
"value",
",",
"float",
")",
":",
"queries",
".",
"update",
"(",
"{",
"key",
":",
"value",
"}",
")",
"else",
":",
"queries",
".",
"update",
"(",
"{",
"key",
":",
"'{0}'",
".",
"format",
"(",
"value",
")",
"}",
")",
"subtables",
"=",
"[",
"]",
"if",
"write_db",
":",
"subtables",
"=",
"[",
"'reactionSystems'",
",",
"'publication'",
"]",
"else",
":",
"subtables",
"=",
"[",
"]",
"data",
"=",
"query",
"(",
"table",
"=",
"'reactions'",
",",
"subtables",
"=",
"subtables",
",",
"columns",
"=",
"columns",
",",
"n_results",
"=",
"n_results",
",",
"queries",
"=",
"queries",
")",
"if",
"not",
"write_db",
":",
"return",
"data",
"print",
"(",
"'Writing result to Reactions.db'",
")",
"unique_ids",
"=",
"[",
"]",
"for",
"row",
"in",
"data",
"[",
"'reactions'",
"]",
"[",
"'edges'",
"]",
":",
"with",
"CathubSQLite",
"(",
"'Reactions.db'",
")",
"as",
"db",
":",
"row",
"=",
"row",
"[",
"'node'",
"]",
"key_values",
"=",
"{",
"}",
"for",
"key",
"in",
"all_columns",
"[",
"'reactions'",
"]",
":",
"v",
"=",
"row",
"[",
"key",
"]",
"# if isinstance(v, unicode):",
"# v = v.encode('utf-8')",
"try",
":",
"v",
"=",
"json",
".",
"loads",
"(",
"v",
")",
"except",
"BaseException",
":",
"pass",
"key_values",
"[",
"convert",
"(",
"key",
")",
"]",
"=",
"v",
"ase_ids",
"=",
"{",
"}",
"energy_corrections",
"=",
"{",
"}",
"for",
"row_rs",
"in",
"row",
"[",
"'reactionSystems'",
"]",
":",
"if",
"row_rs",
"[",
"'name'",
"]",
"==",
"'N/A'",
":",
"continue",
"ase_ids",
"[",
"row_rs",
"[",
"'name'",
"]",
"]",
"=",
"row_rs",
"[",
"'aseId'",
"]",
"energy_corrections",
"[",
"row_rs",
"[",
"'name'",
"]",
"]",
"=",
"row_rs",
"[",
"'energyCorrection'",
"]",
"if",
"not",
"ase_ids",
":",
"ase_ids",
"=",
"None",
"energy_corrections",
"=",
"None",
"else",
":",
"unique_ids",
"+=",
"ase_ids",
".",
"values",
"(",
")",
"key_values",
"[",
"'ase_ids'",
"]",
"=",
"ase_ids",
"key_values",
"[",
"'energy_corrections'",
"]",
"=",
"ase_ids",
"# publications",
"pub_key_values",
"=",
"{",
"}",
"row_p",
"=",
"row",
"[",
"'publication'",
"]",
"for",
"key",
"in",
"all_columns",
"[",
"'publications'",
"]",
":",
"pub_key_values",
"[",
"convert",
"(",
"key",
")",
"]",
"=",
"row_p",
"[",
"key",
"]",
"db",
".",
"write_publication",
"(",
"pub_key_values",
")",
"# reactions and reaction_systems",
"id",
"=",
"db",
".",
"check",
"(",
"key_values",
"[",
"'chemical_composition'",
"]",
",",
"key_values",
"[",
"'reaction_energy'",
"]",
")",
"if",
"id",
"is",
"None",
":",
"id",
"=",
"db",
".",
"write",
"(",
"key_values",
")",
"else",
":",
"db",
".",
"update",
"(",
"id",
",",
"key_values",
")",
"if",
"ase_ids",
"is",
"not",
"None",
":",
"# Ase structures",
"with",
"ase",
".",
"db",
".",
"connect",
"(",
"'Reactions.db'",
")",
"as",
"ase_db",
":",
"con",
"=",
"ase_db",
".",
"connection",
"cur",
"=",
"con",
".",
"cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"'SELECT unique_id from systems;'",
")",
"unique_ids0",
"=",
"cur",
".",
"fetchall",
"(",
")",
"unique_ids0",
"=",
"[",
"un",
"[",
"0",
"]",
"for",
"un",
"in",
"unique_ids0",
"]",
"unique_ids",
"=",
"[",
"un",
"for",
"un",
"in",
"unique_ids",
"if",
"un",
"not",
"in",
"unique_ids0",
"]",
"for",
"unique_id",
"in",
"list",
"(",
"set",
"(",
"unique_ids",
")",
")",
":",
"# if ase_db.count('unique_id={}'.format(unique_id)) == 0:",
"atomsrow",
"=",
"get_atomsrow_by_id",
"(",
"unique_id",
")",
"ase_db",
".",
"write",
"(",
"atomsrow",
")",
"print",
"(",
"'Writing complete!'",
")",
"return",
"data"
] | Get reactions from server
Give key value strings as arguments | [
"Get",
"reactions",
"from",
"server"
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/query.py#L136-L231 | train |
guaix-ucm/numina | numina/user/baserun.py | create_recipe_file_logger | def create_recipe_file_logger(logger, logfile, logformat):
"""Redirect Recipe log messages to a file."""
recipe_formatter = logging.Formatter(logformat)
fh = logging.FileHandler(logfile, mode='w')
fh.setLevel(logging.DEBUG)
fh.setFormatter(recipe_formatter)
return fh | python | def create_recipe_file_logger(logger, logfile, logformat):
"""Redirect Recipe log messages to a file."""
recipe_formatter = logging.Formatter(logformat)
fh = logging.FileHandler(logfile, mode='w')
fh.setLevel(logging.DEBUG)
fh.setFormatter(recipe_formatter)
return fh | [
"def",
"create_recipe_file_logger",
"(",
"logger",
",",
"logfile",
",",
"logformat",
")",
":",
"recipe_formatter",
"=",
"logging",
".",
"Formatter",
"(",
"logformat",
")",
"fh",
"=",
"logging",
".",
"FileHandler",
"(",
"logfile",
",",
"mode",
"=",
"'w'",
")",
"fh",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"fh",
".",
"setFormatter",
"(",
"recipe_formatter",
")",
"return",
"fh"
] | Redirect Recipe log messages to a file. | [
"Redirect",
"Recipe",
"log",
"messages",
"to",
"a",
"file",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/user/baserun.py#L145-L151 | train |
guaix-ucm/numina | numina/user/baserun.py | run_recipe | def run_recipe(recipe, task, rinput, workenv, logger_control):
"""Recipe execution mode of numina."""
# Creating custom logger file
recipe_logger = logging.getLogger(logger_control['default'])
if logger_control['enabled']:
logfile = os.path.join(workenv.resultsdir, logger_control['logfile'])
logformat = logger_control['format']
_logger.debug('creating file logger %r from Recipe logger', logfile)
fh = create_recipe_file_logger(recipe_logger, logfile, logformat)
else:
fh = logging.NullHandler()
recipe_logger.addHandler(fh)
with working_directory(workenv.workdir):
try:
run_recipe_timed(task, recipe, rinput)
return task
finally:
recipe_logger.removeHandler(fh) | python | def run_recipe(recipe, task, rinput, workenv, logger_control):
"""Recipe execution mode of numina."""
# Creating custom logger file
recipe_logger = logging.getLogger(logger_control['default'])
if logger_control['enabled']:
logfile = os.path.join(workenv.resultsdir, logger_control['logfile'])
logformat = logger_control['format']
_logger.debug('creating file logger %r from Recipe logger', logfile)
fh = create_recipe_file_logger(recipe_logger, logfile, logformat)
else:
fh = logging.NullHandler()
recipe_logger.addHandler(fh)
with working_directory(workenv.workdir):
try:
run_recipe_timed(task, recipe, rinput)
return task
finally:
recipe_logger.removeHandler(fh) | [
"def",
"run_recipe",
"(",
"recipe",
",",
"task",
",",
"rinput",
",",
"workenv",
",",
"logger_control",
")",
":",
"# Creating custom logger file",
"recipe_logger",
"=",
"logging",
".",
"getLogger",
"(",
"logger_control",
"[",
"'default'",
"]",
")",
"if",
"logger_control",
"[",
"'enabled'",
"]",
":",
"logfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"workenv",
".",
"resultsdir",
",",
"logger_control",
"[",
"'logfile'",
"]",
")",
"logformat",
"=",
"logger_control",
"[",
"'format'",
"]",
"_logger",
".",
"debug",
"(",
"'creating file logger %r from Recipe logger'",
",",
"logfile",
")",
"fh",
"=",
"create_recipe_file_logger",
"(",
"recipe_logger",
",",
"logfile",
",",
"logformat",
")",
"else",
":",
"fh",
"=",
"logging",
".",
"NullHandler",
"(",
")",
"recipe_logger",
".",
"addHandler",
"(",
"fh",
")",
"with",
"working_directory",
"(",
"workenv",
".",
"workdir",
")",
":",
"try",
":",
"run_recipe_timed",
"(",
"task",
",",
"recipe",
",",
"rinput",
")",
"return",
"task",
"finally",
":",
"recipe_logger",
".",
"removeHandler",
"(",
"fh",
")"
] | Recipe execution mode of numina. | [
"Recipe",
"execution",
"mode",
"of",
"numina",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/user/baserun.py#L154-L175 | train |
guaix-ucm/numina | numina/user/baserun.py | run_recipe_timed | def run_recipe_timed(task, recipe, rinput):
"""Run the recipe and count the time it takes."""
_logger.info('running recipe')
now1 = datetime.datetime.now()
task.state = 1
task.time_start = now1
#
result = recipe(rinput)
_logger.info('result: %r', result)
task.result = result
#
now2 = datetime.datetime.now()
task.state = 2
task.time_end = now2
return task | python | def run_recipe_timed(task, recipe, rinput):
"""Run the recipe and count the time it takes."""
_logger.info('running recipe')
now1 = datetime.datetime.now()
task.state = 1
task.time_start = now1
#
result = recipe(rinput)
_logger.info('result: %r', result)
task.result = result
#
now2 = datetime.datetime.now()
task.state = 2
task.time_end = now2
return task | [
"def",
"run_recipe_timed",
"(",
"task",
",",
"recipe",
",",
"rinput",
")",
":",
"_logger",
".",
"info",
"(",
"'running recipe'",
")",
"now1",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"task",
".",
"state",
"=",
"1",
"task",
".",
"time_start",
"=",
"now1",
"#",
"result",
"=",
"recipe",
"(",
"rinput",
")",
"_logger",
".",
"info",
"(",
"'result: %r'",
",",
"result",
")",
"task",
".",
"result",
"=",
"result",
"#",
"now2",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"task",
".",
"state",
"=",
"2",
"task",
".",
"time_end",
"=",
"now2",
"return",
"task"
] | Run the recipe and count the time it takes. | [
"Run",
"the",
"recipe",
"and",
"count",
"the",
"time",
"it",
"takes",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/user/baserun.py#L178-L192 | train |
kyzima-spb/flask-pony | flask_pony/__init__.py | stop_db_session | def stop_db_session(exc=None):
"""Stops the last db_session"""
# print('==> Stop session', type(exc))
if has_db_session():
exc_type = None
tb = None
if exc:
exc_type, exc, tb = get_exc_info(exc)
db_session.__exit__(exc_type, exc, tb) | python | def stop_db_session(exc=None):
"""Stops the last db_session"""
# print('==> Stop session', type(exc))
if has_db_session():
exc_type = None
tb = None
if exc:
exc_type, exc, tb = get_exc_info(exc)
db_session.__exit__(exc_type, exc, tb) | [
"def",
"stop_db_session",
"(",
"exc",
"=",
"None",
")",
":",
"# print('==> Stop session', type(exc))",
"if",
"has_db_session",
"(",
")",
":",
"exc_type",
"=",
"None",
"tb",
"=",
"None",
"if",
"exc",
":",
"exc_type",
",",
"exc",
",",
"tb",
"=",
"get_exc_info",
"(",
"exc",
")",
"db_session",
".",
"__exit__",
"(",
"exc_type",
",",
"exc",
",",
"tb",
")"
] | Stops the last db_session | [
"Stops",
"the",
"last",
"db_session"
] | 6cf28d70b7ebf415d58fa138fcc70b8dd57432c7 | https://github.com/kyzima-spb/flask-pony/blob/6cf28d70b7ebf415d58fa138fcc70b8dd57432c7/flask_pony/__init__.py#L47-L58 | train |
pylp/pylp | pylp/lib/dest.py | get_path | def get_path(dest, file, cwd = None):
"""Get the writing path of a file."""
if callable(dest):
return dest(file)
if not cwd:
cwd = file.cwd
if not os.path.isabs(dest):
dest = os.path.join(cwd, dest)
relative = os.path.relpath(file.path, file.base)
return os.path.join(dest, relative) | python | def get_path(dest, file, cwd = None):
"""Get the writing path of a file."""
if callable(dest):
return dest(file)
if not cwd:
cwd = file.cwd
if not os.path.isabs(dest):
dest = os.path.join(cwd, dest)
relative = os.path.relpath(file.path, file.base)
return os.path.join(dest, relative) | [
"def",
"get_path",
"(",
"dest",
",",
"file",
",",
"cwd",
"=",
"None",
")",
":",
"if",
"callable",
"(",
"dest",
")",
":",
"return",
"dest",
"(",
"file",
")",
"if",
"not",
"cwd",
":",
"cwd",
"=",
"file",
".",
"cwd",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"dest",
")",
":",
"dest",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cwd",
",",
"dest",
")",
"relative",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"file",
".",
"path",
",",
"file",
".",
"base",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"dest",
",",
"relative",
")"
] | Get the writing path of a file. | [
"Get",
"the",
"writing",
"path",
"of",
"a",
"file",
"."
] | 7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4 | https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/dest.py#L23-L34 | train |
pylp/pylp | pylp/lib/dest.py | write_file | def write_file(path, contents):
"""Write contents to a local file."""
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "w") as file:
file.write(contents) | python | def write_file(path, contents):
"""Write contents to a local file."""
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "w") as file:
file.write(contents) | [
"def",
"write_file",
"(",
"path",
",",
"contents",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
",",
"exist_ok",
"=",
"True",
")",
"with",
"open",
"(",
"path",
",",
"\"w\"",
")",
"as",
"file",
":",
"file",
".",
"write",
"(",
"contents",
")"
] | Write contents to a local file. | [
"Write",
"contents",
"to",
"a",
"local",
"file",
"."
] | 7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4 | https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/dest.py#L37-L41 | train |
SUNCAT-Center/CatHub | cathub/tools.py | get_pub_id | def get_pub_id(title, authors, year):
"construct publication id"
if len(title.split(' ')) > 1 \
and title.split(' ')[0].lower() in ['the', 'a']:
_first_word = title.split(' ')[1].split('_')[0]
else:
_first_word = title.split(' ')[0].split('_')[0]
pub_id = authors[0].split(',')[0].split(' ')[0] + \
_first_word + \
str(year)
return pub_id | python | def get_pub_id(title, authors, year):
"construct publication id"
if len(title.split(' ')) > 1 \
and title.split(' ')[0].lower() in ['the', 'a']:
_first_word = title.split(' ')[1].split('_')[0]
else:
_first_word = title.split(' ')[0].split('_')[0]
pub_id = authors[0].split(',')[0].split(' ')[0] + \
_first_word + \
str(year)
return pub_id | [
"def",
"get_pub_id",
"(",
"title",
",",
"authors",
",",
"year",
")",
":",
"if",
"len",
"(",
"title",
".",
"split",
"(",
"' '",
")",
")",
">",
"1",
"and",
"title",
".",
"split",
"(",
"' '",
")",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"in",
"[",
"'the'",
",",
"'a'",
"]",
":",
"_first_word",
"=",
"title",
".",
"split",
"(",
"' '",
")",
"[",
"1",
"]",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"else",
":",
"_first_word",
"=",
"title",
".",
"split",
"(",
"' '",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"pub_id",
"=",
"authors",
"[",
"0",
"]",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
".",
"split",
"(",
"' '",
")",
"[",
"0",
"]",
"+",
"_first_word",
"+",
"str",
"(",
"year",
")",
"return",
"pub_id"
] | construct publication id | [
"construct",
"publication",
"id"
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/tools.py#L4-L15 | train |
SUNCAT-Center/CatHub | cathub/tools.py | extract_atoms | def extract_atoms(molecule):
"""Return a string with all atoms in molecule"""
if molecule == '':
return molecule
try:
return float(molecule)
except BaseException:
pass
atoms = ''
if not molecule[0].isalpha():
i = 0
while not molecule[i].isalpha():
i += 1
prefactor = float(molecule[:i])
if prefactor < 0:
prefactor = abs(prefactor)
sign = '-'
else:
sign = ''
molecule = molecule[i:]
else:
prefactor = 1
sign = ''
for k in range(len(molecule)):
if molecule[k].isdigit():
for j in range(int(molecule[k]) - 1):
atoms += molecule[k - 1]
else:
atoms += molecule[k]
if prefactor % 1 == 0:
atoms *= int(prefactor)
elif prefactor % 1 == 0.5:
atoms_sort = sorted(atoms)
N = len(atoms)
atoms = ''
for n in range(N):
for m in range(int(prefactor - 0.5)):
atoms += atoms_sort[n]
if n % 2 == 0:
atoms += atoms_sort[n]
return sign + ''.join(sorted(atoms)) | python | def extract_atoms(molecule):
"""Return a string with all atoms in molecule"""
if molecule == '':
return molecule
try:
return float(molecule)
except BaseException:
pass
atoms = ''
if not molecule[0].isalpha():
i = 0
while not molecule[i].isalpha():
i += 1
prefactor = float(molecule[:i])
if prefactor < 0:
prefactor = abs(prefactor)
sign = '-'
else:
sign = ''
molecule = molecule[i:]
else:
prefactor = 1
sign = ''
for k in range(len(molecule)):
if molecule[k].isdigit():
for j in range(int(molecule[k]) - 1):
atoms += molecule[k - 1]
else:
atoms += molecule[k]
if prefactor % 1 == 0:
atoms *= int(prefactor)
elif prefactor % 1 == 0.5:
atoms_sort = sorted(atoms)
N = len(atoms)
atoms = ''
for n in range(N):
for m in range(int(prefactor - 0.5)):
atoms += atoms_sort[n]
if n % 2 == 0:
atoms += atoms_sort[n]
return sign + ''.join(sorted(atoms)) | [
"def",
"extract_atoms",
"(",
"molecule",
")",
":",
"if",
"molecule",
"==",
"''",
":",
"return",
"molecule",
"try",
":",
"return",
"float",
"(",
"molecule",
")",
"except",
"BaseException",
":",
"pass",
"atoms",
"=",
"''",
"if",
"not",
"molecule",
"[",
"0",
"]",
".",
"isalpha",
"(",
")",
":",
"i",
"=",
"0",
"while",
"not",
"molecule",
"[",
"i",
"]",
".",
"isalpha",
"(",
")",
":",
"i",
"+=",
"1",
"prefactor",
"=",
"float",
"(",
"molecule",
"[",
":",
"i",
"]",
")",
"if",
"prefactor",
"<",
"0",
":",
"prefactor",
"=",
"abs",
"(",
"prefactor",
")",
"sign",
"=",
"'-'",
"else",
":",
"sign",
"=",
"''",
"molecule",
"=",
"molecule",
"[",
"i",
":",
"]",
"else",
":",
"prefactor",
"=",
"1",
"sign",
"=",
"''",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"molecule",
")",
")",
":",
"if",
"molecule",
"[",
"k",
"]",
".",
"isdigit",
"(",
")",
":",
"for",
"j",
"in",
"range",
"(",
"int",
"(",
"molecule",
"[",
"k",
"]",
")",
"-",
"1",
")",
":",
"atoms",
"+=",
"molecule",
"[",
"k",
"-",
"1",
"]",
"else",
":",
"atoms",
"+=",
"molecule",
"[",
"k",
"]",
"if",
"prefactor",
"%",
"1",
"==",
"0",
":",
"atoms",
"*=",
"int",
"(",
"prefactor",
")",
"elif",
"prefactor",
"%",
"1",
"==",
"0.5",
":",
"atoms_sort",
"=",
"sorted",
"(",
"atoms",
")",
"N",
"=",
"len",
"(",
"atoms",
")",
"atoms",
"=",
"''",
"for",
"n",
"in",
"range",
"(",
"N",
")",
":",
"for",
"m",
"in",
"range",
"(",
"int",
"(",
"prefactor",
"-",
"0.5",
")",
")",
":",
"atoms",
"+=",
"atoms_sort",
"[",
"n",
"]",
"if",
"n",
"%",
"2",
"==",
"0",
":",
"atoms",
"+=",
"atoms_sort",
"[",
"n",
"]",
"return",
"sign",
"+",
"''",
".",
"join",
"(",
"sorted",
"(",
"atoms",
")",
")"
] | Return a string with all atoms in molecule | [
"Return",
"a",
"string",
"with",
"all",
"atoms",
"in",
"molecule"
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/tools.py#L18-L59 | train |
SUNCAT-Center/CatHub | cathub/tools.py | check_reaction | def check_reaction(reactants, products):
"""Check the stoichiometry and format of chemical reaction used for
folder structure.
list of reactants -> list of products
"""
reactant_list = [reactant.split('@')[0].strip(
'star').strip('gas') for reactant in reactants]
product_list = [product.split('@')[0].strip(
'star').strip('gas') for product in products]
reactant_atoms = [extract_atoms(reactant) for reactant in reactant_list]
product_atoms = [extract_atoms(product) for product in product_list]
reactants = add_atoms(reactant_atoms)
products = add_atoms(product_atoms)
r_stars = 0
p_stars = 0
for i, a in enumerate(reactant_atoms):
if a == '' or 'star' in reactant_list[i]:
r_stars += 1
elif isinstance(a, float):
r_stars += a
for a in product_atoms:
if a == '':
p_stars += 1
elif isinstance(a, float):
p_stars += a
assert ''.join(sorted(reactants)) == ''.join(sorted(products)) | python | def check_reaction(reactants, products):
"""Check the stoichiometry and format of chemical reaction used for
folder structure.
list of reactants -> list of products
"""
reactant_list = [reactant.split('@')[0].strip(
'star').strip('gas') for reactant in reactants]
product_list = [product.split('@')[0].strip(
'star').strip('gas') for product in products]
reactant_atoms = [extract_atoms(reactant) for reactant in reactant_list]
product_atoms = [extract_atoms(product) for product in product_list]
reactants = add_atoms(reactant_atoms)
products = add_atoms(product_atoms)
r_stars = 0
p_stars = 0
for i, a in enumerate(reactant_atoms):
if a == '' or 'star' in reactant_list[i]:
r_stars += 1
elif isinstance(a, float):
r_stars += a
for a in product_atoms:
if a == '':
p_stars += 1
elif isinstance(a, float):
p_stars += a
assert ''.join(sorted(reactants)) == ''.join(sorted(products)) | [
"def",
"check_reaction",
"(",
"reactants",
",",
"products",
")",
":",
"reactant_list",
"=",
"[",
"reactant",
".",
"split",
"(",
"'@'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
"'star'",
")",
".",
"strip",
"(",
"'gas'",
")",
"for",
"reactant",
"in",
"reactants",
"]",
"product_list",
"=",
"[",
"product",
".",
"split",
"(",
"'@'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
"'star'",
")",
".",
"strip",
"(",
"'gas'",
")",
"for",
"product",
"in",
"products",
"]",
"reactant_atoms",
"=",
"[",
"extract_atoms",
"(",
"reactant",
")",
"for",
"reactant",
"in",
"reactant_list",
"]",
"product_atoms",
"=",
"[",
"extract_atoms",
"(",
"product",
")",
"for",
"product",
"in",
"product_list",
"]",
"reactants",
"=",
"add_atoms",
"(",
"reactant_atoms",
")",
"products",
"=",
"add_atoms",
"(",
"product_atoms",
")",
"r_stars",
"=",
"0",
"p_stars",
"=",
"0",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"reactant_atoms",
")",
":",
"if",
"a",
"==",
"''",
"or",
"'star'",
"in",
"reactant_list",
"[",
"i",
"]",
":",
"r_stars",
"+=",
"1",
"elif",
"isinstance",
"(",
"a",
",",
"float",
")",
":",
"r_stars",
"+=",
"a",
"for",
"a",
"in",
"product_atoms",
":",
"if",
"a",
"==",
"''",
":",
"p_stars",
"+=",
"1",
"elif",
"isinstance",
"(",
"a",
",",
"float",
")",
":",
"p_stars",
"+=",
"a",
"assert",
"''",
".",
"join",
"(",
"sorted",
"(",
"reactants",
")",
")",
"==",
"''",
".",
"join",
"(",
"sorted",
"(",
"products",
")",
")"
] | Check the stoichiometry and format of chemical reaction used for
folder structure.
list of reactants -> list of products | [
"Check",
"the",
"stoichiometry",
"and",
"format",
"of",
"chemical",
"reaction",
"used",
"for",
"folder",
"structure",
".",
"list",
"of",
"reactants",
"-",
">",
"list",
"of",
"products"
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/tools.py#L75-L104 | train |
guaix-ucm/numina | numina/core/pipelineload.py | check_section | def check_section(node, section, keys=None):
"""Validate keys in a section"""
if keys:
for key in keys:
if key not in node:
raise ValueError('Missing key %r inside %r node' % (key, section)) | python | def check_section(node, section, keys=None):
"""Validate keys in a section"""
if keys:
for key in keys:
if key not in node:
raise ValueError('Missing key %r inside %r node' % (key, section)) | [
"def",
"check_section",
"(",
"node",
",",
"section",
",",
"keys",
"=",
"None",
")",
":",
"if",
"keys",
":",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
"not",
"in",
"node",
":",
"raise",
"ValueError",
"(",
"'Missing key %r inside %r node'",
"%",
"(",
"key",
",",
"section",
")",
")"
] | Validate keys in a section | [
"Validate",
"keys",
"in",
"a",
"section"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipelineload.py#L29-L34 | train |
guaix-ucm/numina | numina/core/pipelineload.py | drp_load | def drp_load(package, resource, confclass=None):
"""Load the DRPS from a resource file."""
data = pkgutil.get_data(package, resource)
return drp_load_data(package, data, confclass=confclass) | python | def drp_load(package, resource, confclass=None):
"""Load the DRPS from a resource file."""
data = pkgutil.get_data(package, resource)
return drp_load_data(package, data, confclass=confclass) | [
"def",
"drp_load",
"(",
"package",
",",
"resource",
",",
"confclass",
"=",
"None",
")",
":",
"data",
"=",
"pkgutil",
".",
"get_data",
"(",
"package",
",",
"resource",
")",
"return",
"drp_load_data",
"(",
"package",
",",
"data",
",",
"confclass",
"=",
"confclass",
")"
] | Load the DRPS from a resource file. | [
"Load",
"the",
"DRPS",
"from",
"a",
"resource",
"file",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipelineload.py#L37-L40 | train |
guaix-ucm/numina | numina/core/pipelineload.py | drp_load_data | def drp_load_data(package, data, confclass=None):
"""Load the DRPS from data."""
drpdict = yaml.safe_load(data)
ins = load_instrument(package, drpdict, confclass=confclass)
if ins.version == 'undefined':
pkg = importlib.import_module(package)
ins.version = getattr(pkg, '__version__', 'undefined')
return ins | python | def drp_load_data(package, data, confclass=None):
"""Load the DRPS from data."""
drpdict = yaml.safe_load(data)
ins = load_instrument(package, drpdict, confclass=confclass)
if ins.version == 'undefined':
pkg = importlib.import_module(package)
ins.version = getattr(pkg, '__version__', 'undefined')
return ins | [
"def",
"drp_load_data",
"(",
"package",
",",
"data",
",",
"confclass",
"=",
"None",
")",
":",
"drpdict",
"=",
"yaml",
".",
"safe_load",
"(",
"data",
")",
"ins",
"=",
"load_instrument",
"(",
"package",
",",
"drpdict",
",",
"confclass",
"=",
"confclass",
")",
"if",
"ins",
".",
"version",
"==",
"'undefined'",
":",
"pkg",
"=",
"importlib",
".",
"import_module",
"(",
"package",
")",
"ins",
".",
"version",
"=",
"getattr",
"(",
"pkg",
",",
"'__version__'",
",",
"'undefined'",
")",
"return",
"ins"
] | Load the DRPS from data. | [
"Load",
"the",
"DRPS",
"from",
"data",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipelineload.py#L43-L50 | train |
guaix-ucm/numina | numina/core/pipelineload.py | load_modes | def load_modes(node):
"""Load all observing modes"""
if isinstance(node, list):
values = [load_mode(child) for child in node]
keys = [mode.key for mode in values]
return dict(zip(keys,values))
elif isinstance(node, dict):
values = {key: load_mode(child) for key, child in node}
return values
else:
raise NotImplementedError | python | def load_modes(node):
"""Load all observing modes"""
if isinstance(node, list):
values = [load_mode(child) for child in node]
keys = [mode.key for mode in values]
return dict(zip(keys,values))
elif isinstance(node, dict):
values = {key: load_mode(child) for key, child in node}
return values
else:
raise NotImplementedError | [
"def",
"load_modes",
"(",
"node",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"list",
")",
":",
"values",
"=",
"[",
"load_mode",
"(",
"child",
")",
"for",
"child",
"in",
"node",
"]",
"keys",
"=",
"[",
"mode",
".",
"key",
"for",
"mode",
"in",
"values",
"]",
"return",
"dict",
"(",
"zip",
"(",
"keys",
",",
"values",
")",
")",
"elif",
"isinstance",
"(",
"node",
",",
"dict",
")",
":",
"values",
"=",
"{",
"key",
":",
"load_mode",
"(",
"child",
")",
"for",
"key",
",",
"child",
"in",
"node",
"}",
"return",
"values",
"else",
":",
"raise",
"NotImplementedError"
] | Load all observing modes | [
"Load",
"all",
"observing",
"modes"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipelineload.py#L53-L63 | train |
guaix-ucm/numina | numina/core/pipelineload.py | load_mode | def load_mode(node):
"""Load one observing mdode"""
obs_mode = ObservingMode()
obs_mode.__dict__.update(node)
# handle validator
load_mode_validator(obs_mode, node)
# handle builder
load_mode_builder(obs_mode, node)
# handle tagger:
load_mode_tagger(obs_mode, node)
return obs_mode | python | def load_mode(node):
"""Load one observing mdode"""
obs_mode = ObservingMode()
obs_mode.__dict__.update(node)
# handle validator
load_mode_validator(obs_mode, node)
# handle builder
load_mode_builder(obs_mode, node)
# handle tagger:
load_mode_tagger(obs_mode, node)
return obs_mode | [
"def",
"load_mode",
"(",
"node",
")",
":",
"obs_mode",
"=",
"ObservingMode",
"(",
")",
"obs_mode",
".",
"__dict__",
".",
"update",
"(",
"node",
")",
"# handle validator",
"load_mode_validator",
"(",
"obs_mode",
",",
"node",
")",
"# handle builder",
"load_mode_builder",
"(",
"obs_mode",
",",
"node",
")",
"# handle tagger:",
"load_mode_tagger",
"(",
"obs_mode",
",",
"node",
")",
"return",
"obs_mode"
] | Load one observing mdode | [
"Load",
"one",
"observing",
"mdode"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipelineload.py#L65-L79 | train |
guaix-ucm/numina | numina/core/pipelineload.py | load_mode_tagger | def load_mode_tagger(obs_mode, node):
"""Load observing mode OB tagger"""
# handle tagger:
ntagger = node.get('tagger')
if ntagger is None:
pass
elif isinstance(ntagger, list):
def full_tagger(obsres):
return get_tags_from_full_ob(obsres, reqtags=ntagger)
obs_mode.tagger = full_tagger
elif isinstance(ntagger, six.string_types):
# load function
obs_mode.tagger = import_object(ntagger)
else:
raise TypeError('tagger must be None, a list or a string')
return obs_mode | python | def load_mode_tagger(obs_mode, node):
"""Load observing mode OB tagger"""
# handle tagger:
ntagger = node.get('tagger')
if ntagger is None:
pass
elif isinstance(ntagger, list):
def full_tagger(obsres):
return get_tags_from_full_ob(obsres, reqtags=ntagger)
obs_mode.tagger = full_tagger
elif isinstance(ntagger, six.string_types):
# load function
obs_mode.tagger = import_object(ntagger)
else:
raise TypeError('tagger must be None, a list or a string')
return obs_mode | [
"def",
"load_mode_tagger",
"(",
"obs_mode",
",",
"node",
")",
":",
"# handle tagger:",
"ntagger",
"=",
"node",
".",
"get",
"(",
"'tagger'",
")",
"if",
"ntagger",
"is",
"None",
":",
"pass",
"elif",
"isinstance",
"(",
"ntagger",
",",
"list",
")",
":",
"def",
"full_tagger",
"(",
"obsres",
")",
":",
"return",
"get_tags_from_full_ob",
"(",
"obsres",
",",
"reqtags",
"=",
"ntagger",
")",
"obs_mode",
".",
"tagger",
"=",
"full_tagger",
"elif",
"isinstance",
"(",
"ntagger",
",",
"six",
".",
"string_types",
")",
":",
"# load function",
"obs_mode",
".",
"tagger",
"=",
"import_object",
"(",
"ntagger",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'tagger must be None, a list or a string'",
")",
"return",
"obs_mode"
] | Load observing mode OB tagger | [
"Load",
"observing",
"mode",
"OB",
"tagger"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipelineload.py#L82-L102 | train |
guaix-ucm/numina | numina/core/pipelineload.py | load_mode_builder | def load_mode_builder(obs_mode, node):
"""Load observing mode OB builder"""
# Check 'builder' and 'builder_options'
nval1 = node.get('builder')
if nval1 is not None:
if isinstance(nval1, str):
# override method
newmethod = import_object(nval1)
obs_mode.build_ob = newmethod.__get__(obs_mode)
else:
raise TypeError('builder must be None or a string')
else:
nval2 = node.get('builder_options')
if nval2 is not None:
if isinstance(nval2, list):
for opt_dict in nval2:
if 'result_of' in opt_dict:
fields = opt_dict['result_of']
obs_mode.build_ob_options = ResultOf(**fields)
break
else:
raise TypeError('builder_options must be None or a list')
return obs_mode | python | def load_mode_builder(obs_mode, node):
"""Load observing mode OB builder"""
# Check 'builder' and 'builder_options'
nval1 = node.get('builder')
if nval1 is not None:
if isinstance(nval1, str):
# override method
newmethod = import_object(nval1)
obs_mode.build_ob = newmethod.__get__(obs_mode)
else:
raise TypeError('builder must be None or a string')
else:
nval2 = node.get('builder_options')
if nval2 is not None:
if isinstance(nval2, list):
for opt_dict in nval2:
if 'result_of' in opt_dict:
fields = opt_dict['result_of']
obs_mode.build_ob_options = ResultOf(**fields)
break
else:
raise TypeError('builder_options must be None or a list')
return obs_mode | [
"def",
"load_mode_builder",
"(",
"obs_mode",
",",
"node",
")",
":",
"# Check 'builder' and 'builder_options'",
"nval1",
"=",
"node",
".",
"get",
"(",
"'builder'",
")",
"if",
"nval1",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"nval1",
",",
"str",
")",
":",
"# override method",
"newmethod",
"=",
"import_object",
"(",
"nval1",
")",
"obs_mode",
".",
"build_ob",
"=",
"newmethod",
".",
"__get__",
"(",
"obs_mode",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'builder must be None or a string'",
")",
"else",
":",
"nval2",
"=",
"node",
".",
"get",
"(",
"'builder_options'",
")",
"if",
"nval2",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"nval2",
",",
"list",
")",
":",
"for",
"opt_dict",
"in",
"nval2",
":",
"if",
"'result_of'",
"in",
"opt_dict",
":",
"fields",
"=",
"opt_dict",
"[",
"'result_of'",
"]",
"obs_mode",
".",
"build_ob_options",
"=",
"ResultOf",
"(",
"*",
"*",
"fields",
")",
"break",
"else",
":",
"raise",
"TypeError",
"(",
"'builder_options must be None or a list'",
")",
"return",
"obs_mode"
] | Load observing mode OB builder | [
"Load",
"observing",
"mode",
"OB",
"builder"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipelineload.py#L105-L132 | train |
guaix-ucm/numina | numina/core/pipelineload.py | load_mode_validator | def load_mode_validator(obs_mode, node):
"""Load observing mode validator"""
nval = node.get('validator')
if nval is None:
pass
elif isinstance(nval, str):
# load function
obs_mode.validator = import_object(nval)
else:
raise TypeError('validator must be None or a string')
return obs_mode | python | def load_mode_validator(obs_mode, node):
"""Load observing mode validator"""
nval = node.get('validator')
if nval is None:
pass
elif isinstance(nval, str):
# load function
obs_mode.validator = import_object(nval)
else:
raise TypeError('validator must be None or a string')
return obs_mode | [
"def",
"load_mode_validator",
"(",
"obs_mode",
",",
"node",
")",
":",
"nval",
"=",
"node",
".",
"get",
"(",
"'validator'",
")",
"if",
"nval",
"is",
"None",
":",
"pass",
"elif",
"isinstance",
"(",
"nval",
",",
"str",
")",
":",
"# load function",
"obs_mode",
".",
"validator",
"=",
"import_object",
"(",
"nval",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'validator must be None or a string'",
")",
"return",
"obs_mode"
] | Load observing mode validator | [
"Load",
"observing",
"mode",
"validator"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipelineload.py#L135-L148 | train |
xflr6/bitsets | bitsets/series.py | Series.frommembers | def frommembers(cls, members):
"""Series from iterable of member iterables."""
return cls.frombitsets(map(cls.BitSet.frommembers, members)) | python | def frommembers(cls, members):
"""Series from iterable of member iterables."""
return cls.frombitsets(map(cls.BitSet.frommembers, members)) | [
"def",
"frommembers",
"(",
"cls",
",",
"members",
")",
":",
"return",
"cls",
".",
"frombitsets",
"(",
"map",
"(",
"cls",
".",
"BitSet",
".",
"frommembers",
",",
"members",
")",
")"
] | Series from iterable of member iterables. | [
"Series",
"from",
"iterable",
"of",
"member",
"iterables",
"."
] | ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf | https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/series.py#L18-L20 | train |
xflr6/bitsets | bitsets/series.py | Series.frombools | def frombools(cls, bools):
"""Series from iterable of boolean evaluable iterables."""
return cls.frombitsets(map(cls.BitSet.frombools, bools)) | python | def frombools(cls, bools):
"""Series from iterable of boolean evaluable iterables."""
return cls.frombitsets(map(cls.BitSet.frombools, bools)) | [
"def",
"frombools",
"(",
"cls",
",",
"bools",
")",
":",
"return",
"cls",
".",
"frombitsets",
"(",
"map",
"(",
"cls",
".",
"BitSet",
".",
"frombools",
",",
"bools",
")",
")"
] | Series from iterable of boolean evaluable iterables. | [
"Series",
"from",
"iterable",
"of",
"boolean",
"evaluable",
"iterables",
"."
] | ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf | https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/series.py#L23-L25 | train |
xflr6/bitsets | bitsets/series.py | Series.frombits | def frombits(cls, bits):
"""Series from binary string arguments."""
return cls.frombitsets(map(cls.BitSet.frombits, bits)) | python | def frombits(cls, bits):
"""Series from binary string arguments."""
return cls.frombitsets(map(cls.BitSet.frombits, bits)) | [
"def",
"frombits",
"(",
"cls",
",",
"bits",
")",
":",
"return",
"cls",
".",
"frombitsets",
"(",
"map",
"(",
"cls",
".",
"BitSet",
".",
"frombits",
",",
"bits",
")",
")"
] | Series from binary string arguments. | [
"Series",
"from",
"binary",
"string",
"arguments",
"."
] | ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf | https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/series.py#L28-L30 | train |
xflr6/bitsets | bitsets/series.py | Series.fromints | def fromints(cls, ints):
"""Series from integer rank arguments."""
return cls.frombitsets(map(cls.BitSet.fromint, ints)) | python | def fromints(cls, ints):
"""Series from integer rank arguments."""
return cls.frombitsets(map(cls.BitSet.fromint, ints)) | [
"def",
"fromints",
"(",
"cls",
",",
"ints",
")",
":",
"return",
"cls",
".",
"frombitsets",
"(",
"map",
"(",
"cls",
".",
"BitSet",
".",
"fromint",
",",
"ints",
")",
")"
] | Series from integer rank arguments. | [
"Series",
"from",
"integer",
"rank",
"arguments",
"."
] | ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf | https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/series.py#L33-L35 | train |
xflr6/bitsets | bitsets/series.py | Series.index_sets | def index_sets(self, as_set=False):
"""Return the series as list of index set tuples."""
indexes = frozenset if as_set else tuple
return [indexes(b.iter_set()) for b in self] | python | def index_sets(self, as_set=False):
"""Return the series as list of index set tuples."""
indexes = frozenset if as_set else tuple
return [indexes(b.iter_set()) for b in self] | [
"def",
"index_sets",
"(",
"self",
",",
"as_set",
"=",
"False",
")",
":",
"indexes",
"=",
"frozenset",
"if",
"as_set",
"else",
"tuple",
"return",
"[",
"indexes",
"(",
"b",
".",
"iter_set",
"(",
")",
")",
"for",
"b",
"in",
"self",
"]"
] | Return the series as list of index set tuples. | [
"Return",
"the",
"series",
"as",
"list",
"of",
"index",
"set",
"tuples",
"."
] | ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf | https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/series.py#L57-L60 | train |
arkottke/pysra | pysra/output.py | append_arrays | def append_arrays(many, single):
"""Append an array to another padding with NaNs for constant length.
Parameters
----------
many : array_like of rank (j, k)
values appended to a copy of this array. This may be a 1-D or 2-D
array.
single : array_like of rank l
values to append. This should be a 1-D array.
Returns
-------
append : :class:`numpy.ndarray`
2-D array with rank (j + 1, max(k, l)) with missing values padded
with :class:`numpy.nan`
"""
assert np.ndim(single) == 1
# Check if the values need to be padded to for equal length
diff = single.shape[0] - many.shape[0]
if diff < 0:
single = np.pad(single, (0, -diff), 'constant', constant_values=np.nan)
elif diff > 0:
many = np.pad(many, ((0, diff), ), 'constant', constant_values=np.nan)
else:
# No padding needed
pass
return np.c_[many, single] | python | def append_arrays(many, single):
"""Append an array to another padding with NaNs for constant length.
Parameters
----------
many : array_like of rank (j, k)
values appended to a copy of this array. This may be a 1-D or 2-D
array.
single : array_like of rank l
values to append. This should be a 1-D array.
Returns
-------
append : :class:`numpy.ndarray`
2-D array with rank (j + 1, max(k, l)) with missing values padded
with :class:`numpy.nan`
"""
assert np.ndim(single) == 1
# Check if the values need to be padded to for equal length
diff = single.shape[0] - many.shape[0]
if diff < 0:
single = np.pad(single, (0, -diff), 'constant', constant_values=np.nan)
elif diff > 0:
many = np.pad(many, ((0, diff), ), 'constant', constant_values=np.nan)
else:
# No padding needed
pass
return np.c_[many, single] | [
"def",
"append_arrays",
"(",
"many",
",",
"single",
")",
":",
"assert",
"np",
".",
"ndim",
"(",
"single",
")",
"==",
"1",
"# Check if the values need to be padded to for equal length",
"diff",
"=",
"single",
".",
"shape",
"[",
"0",
"]",
"-",
"many",
".",
"shape",
"[",
"0",
"]",
"if",
"diff",
"<",
"0",
":",
"single",
"=",
"np",
".",
"pad",
"(",
"single",
",",
"(",
"0",
",",
"-",
"diff",
")",
",",
"'constant'",
",",
"constant_values",
"=",
"np",
".",
"nan",
")",
"elif",
"diff",
">",
"0",
":",
"many",
"=",
"np",
".",
"pad",
"(",
"many",
",",
"(",
"(",
"0",
",",
"diff",
")",
",",
")",
",",
"'constant'",
",",
"constant_values",
"=",
"np",
".",
"nan",
")",
"else",
":",
"# No padding needed",
"pass",
"return",
"np",
".",
"c_",
"[",
"many",
",",
"single",
"]"
] | Append an array to another padding with NaNs for constant length.
Parameters
----------
many : array_like of rank (j, k)
values appended to a copy of this array. This may be a 1-D or 2-D
array.
single : array_like of rank l
values to append. This should be a 1-D array.
Returns
-------
append : :class:`numpy.ndarray`
2-D array with rank (j + 1, max(k, l)) with missing values padded
with :class:`numpy.nan` | [
"Append",
"an",
"array",
"to",
"another",
"padding",
"with",
"NaNs",
"for",
"constant",
"length",
"."
] | c72fd389d6c15203c0c00728ac00f101bae6369d | https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/output.py#L46-L74 | train |
arkottke/pysra | pysra/output.py | RatioBasedOutput._get_locations | def _get_locations(self, calc):
"""Locate locations within the profile."""
return (self._location_in(calc.profile),
self._location_out(calc.profile)) | python | def _get_locations(self, calc):
"""Locate locations within the profile."""
return (self._location_in(calc.profile),
self._location_out(calc.profile)) | [
"def",
"_get_locations",
"(",
"self",
",",
"calc",
")",
":",
"return",
"(",
"self",
".",
"_location_in",
"(",
"calc",
".",
"profile",
")",
",",
"self",
".",
"_location_out",
"(",
"calc",
".",
"profile",
")",
")"
] | Locate locations within the profile. | [
"Locate",
"locations",
"within",
"the",
"profile",
"."
] | c72fd389d6c15203c0c00728ac00f101bae6369d | https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/output.py#L363-L366 | train |
guaix-ucm/numina | numina/array/wavecalib/fix_pix_borders.py | find_pix_borders | def find_pix_borders(sp, sought_value):
"""Find useful region of a given spectrum
Detemine the useful region of a given spectrum by skipping
the initial (final) pixels with values equal to 'sought_value'.
Parameters
----------
sp : 1D numpy array
Input spectrum.
sought_value : int, float, bool
Pixel value that indicate missing data in the spectrum.
Returns
-------
jmin, jmax : tuple (integers)
Valid spectrum region (in array coordinates, from 0 to
NAXIS1 - 1). If the values of all the pixels in the spectrum
are equal to 'sought_value', the returned values are jmin=-1
and jmax=naxis1.
"""
if sp.ndim != 1:
raise ValueError('Unexpected number of dimensions:', sp.ndim)
naxis1 = len(sp)
jborder_min = -1
jborder_max = naxis1
# only spectra with values different from 'sought_value'
if not np.alltrue(sp == sought_value):
# left border
while True:
jborder_min += 1
if sp[jborder_min] != sought_value:
break
# right border
while True:
jborder_max -= 1
if sp[jborder_max] != sought_value:
break
return jborder_min, jborder_max | python | def find_pix_borders(sp, sought_value):
"""Find useful region of a given spectrum
Detemine the useful region of a given spectrum by skipping
the initial (final) pixels with values equal to 'sought_value'.
Parameters
----------
sp : 1D numpy array
Input spectrum.
sought_value : int, float, bool
Pixel value that indicate missing data in the spectrum.
Returns
-------
jmin, jmax : tuple (integers)
Valid spectrum region (in array coordinates, from 0 to
NAXIS1 - 1). If the values of all the pixels in the spectrum
are equal to 'sought_value', the returned values are jmin=-1
and jmax=naxis1.
"""
if sp.ndim != 1:
raise ValueError('Unexpected number of dimensions:', sp.ndim)
naxis1 = len(sp)
jborder_min = -1
jborder_max = naxis1
# only spectra with values different from 'sought_value'
if not np.alltrue(sp == sought_value):
# left border
while True:
jborder_min += 1
if sp[jborder_min] != sought_value:
break
# right border
while True:
jborder_max -= 1
if sp[jborder_max] != sought_value:
break
return jborder_min, jborder_max | [
"def",
"find_pix_borders",
"(",
"sp",
",",
"sought_value",
")",
":",
"if",
"sp",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'Unexpected number of dimensions:'",
",",
"sp",
".",
"ndim",
")",
"naxis1",
"=",
"len",
"(",
"sp",
")",
"jborder_min",
"=",
"-",
"1",
"jborder_max",
"=",
"naxis1",
"# only spectra with values different from 'sought_value'",
"if",
"not",
"np",
".",
"alltrue",
"(",
"sp",
"==",
"sought_value",
")",
":",
"# left border",
"while",
"True",
":",
"jborder_min",
"+=",
"1",
"if",
"sp",
"[",
"jborder_min",
"]",
"!=",
"sought_value",
":",
"break",
"# right border",
"while",
"True",
":",
"jborder_max",
"-=",
"1",
"if",
"sp",
"[",
"jborder_max",
"]",
"!=",
"sought_value",
":",
"break",
"return",
"jborder_min",
",",
"jborder_max"
] | Find useful region of a given spectrum
Detemine the useful region of a given spectrum by skipping
the initial (final) pixels with values equal to 'sought_value'.
Parameters
----------
sp : 1D numpy array
Input spectrum.
sought_value : int, float, bool
Pixel value that indicate missing data in the spectrum.
Returns
-------
jmin, jmax : tuple (integers)
Valid spectrum region (in array coordinates, from 0 to
NAXIS1 - 1). If the values of all the pixels in the spectrum
are equal to 'sought_value', the returned values are jmin=-1
and jmax=naxis1. | [
"Find",
"useful",
"region",
"of",
"a",
"given",
"spectrum"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/fix_pix_borders.py#L16-L59 | train |
guaix-ucm/numina | numina/array/wavecalib/fix_pix_borders.py | fix_pix_borders | def fix_pix_borders(image2d, nreplace, sought_value, replacement_value):
"""Replace a few pixels at the borders of each spectrum.
Set to 'replacement_value' 'nreplace' pixels at the beginning (at
the end) of each spectrum just after (before) the spectrum value
changes from (to) 'sought_value', as seen from the image borders.
Parameters
----------
image2d : numpy array
Initial 2D image.
nreplace : int
Number of pixels to be replaced in each border.
sought_value : int, float, bool
Pixel value that indicates missing data in the spectrum.
replacement_value : int, float, bool
Pixel value to be employed in the 'nreplace' pixels.
Returns
-------
image2d : numpy array
Final 2D image.
"""
# input image size
naxis2, naxis1 = image2d.shape
for i in range(naxis2):
# only spectra with values different from 'sought_value'
jborder_min, jborder_max = find_pix_borders(
image2d[i, :],
sought_value=sought_value
)
# left border
if jborder_min != -1:
j1 = jborder_min
j2 = min(j1 + nreplace, naxis1)
image2d[i, j1:j2] = replacement_value
# right border
if jborder_max != naxis1:
j2 = jborder_max + 1
j1 = max(j2 - nreplace, 0)
image2d[i, j1:j2] = replacement_value
return image2d | python | def fix_pix_borders(image2d, nreplace, sought_value, replacement_value):
"""Replace a few pixels at the borders of each spectrum.
Set to 'replacement_value' 'nreplace' pixels at the beginning (at
the end) of each spectrum just after (before) the spectrum value
changes from (to) 'sought_value', as seen from the image borders.
Parameters
----------
image2d : numpy array
Initial 2D image.
nreplace : int
Number of pixels to be replaced in each border.
sought_value : int, float, bool
Pixel value that indicates missing data in the spectrum.
replacement_value : int, float, bool
Pixel value to be employed in the 'nreplace' pixels.
Returns
-------
image2d : numpy array
Final 2D image.
"""
# input image size
naxis2, naxis1 = image2d.shape
for i in range(naxis2):
# only spectra with values different from 'sought_value'
jborder_min, jborder_max = find_pix_borders(
image2d[i, :],
sought_value=sought_value
)
# left border
if jborder_min != -1:
j1 = jborder_min
j2 = min(j1 + nreplace, naxis1)
image2d[i, j1:j2] = replacement_value
# right border
if jborder_max != naxis1:
j2 = jborder_max + 1
j1 = max(j2 - nreplace, 0)
image2d[i, j1:j2] = replacement_value
return image2d | [
"def",
"fix_pix_borders",
"(",
"image2d",
",",
"nreplace",
",",
"sought_value",
",",
"replacement_value",
")",
":",
"# input image size",
"naxis2",
",",
"naxis1",
"=",
"image2d",
".",
"shape",
"for",
"i",
"in",
"range",
"(",
"naxis2",
")",
":",
"# only spectra with values different from 'sought_value'",
"jborder_min",
",",
"jborder_max",
"=",
"find_pix_borders",
"(",
"image2d",
"[",
"i",
",",
":",
"]",
",",
"sought_value",
"=",
"sought_value",
")",
"# left border",
"if",
"jborder_min",
"!=",
"-",
"1",
":",
"j1",
"=",
"jborder_min",
"j2",
"=",
"min",
"(",
"j1",
"+",
"nreplace",
",",
"naxis1",
")",
"image2d",
"[",
"i",
",",
"j1",
":",
"j2",
"]",
"=",
"replacement_value",
"# right border",
"if",
"jborder_max",
"!=",
"naxis1",
":",
"j2",
"=",
"jborder_max",
"+",
"1",
"j1",
"=",
"max",
"(",
"j2",
"-",
"nreplace",
",",
"0",
")",
"image2d",
"[",
"i",
",",
"j1",
":",
"j2",
"]",
"=",
"replacement_value",
"return",
"image2d"
] | Replace a few pixels at the borders of each spectrum.
Set to 'replacement_value' 'nreplace' pixels at the beginning (at
the end) of each spectrum just after (before) the spectrum value
changes from (to) 'sought_value', as seen from the image borders.
Parameters
----------
image2d : numpy array
Initial 2D image.
nreplace : int
Number of pixels to be replaced in each border.
sought_value : int, float, bool
Pixel value that indicates missing data in the spectrum.
replacement_value : int, float, bool
Pixel value to be employed in the 'nreplace' pixels.
Returns
-------
image2d : numpy array
Final 2D image. | [
"Replace",
"a",
"few",
"pixels",
"at",
"the",
"borders",
"of",
"each",
"spectrum",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/fix_pix_borders.py#L62-L108 | train |
guaix-ucm/numina | numina/array/wavecalib/fix_pix_borders.py | define_mask_borders | def define_mask_borders(image2d, sought_value, nadditional=0):
"""Generate mask avoiding undesired values at the borders.
Set to True image borders with values equal to 'sought_value'
Parameters
----------
image2d : numpy array
Initial 2D image.
sought_value : int, float, bool
Pixel value that indicates missing data in the spectrum.
nadditional : int
Number of additional pixels to be masked at each border.
Returns
-------
mask2d : numpy array
2D mask.
borders : list of tuples
List of tuples (jmin, jmax) with the border limits (in array
coordinates) found by find_pix_borders.
"""
# input image size
naxis2, naxis1 = image2d.shape
# initialize mask
mask2d = np.zeros((naxis2, naxis1), dtype=bool)
# initialize list to store borders
borders = []
for i in range(naxis2):
# only spectra with values different from 'sought_value'
jborder_min, jborder_max = find_pix_borders(
image2d[i, :],
sought_value=sought_value
)
borders.append((jborder_min, jborder_max))
if (jborder_min, jborder_max) != (-1, naxis1):
if jborder_min != -1:
j1 = 0
j2 = jborder_min + nadditional + 1
mask2d[i, j1:j2] = True
if jborder_max != naxis1:
j1 = jborder_max - nadditional
j2 = naxis1
mask2d[i, j1:j2] = True
return mask2d, borders | python | def define_mask_borders(image2d, sought_value, nadditional=0):
"""Generate mask avoiding undesired values at the borders.
Set to True image borders with values equal to 'sought_value'
Parameters
----------
image2d : numpy array
Initial 2D image.
sought_value : int, float, bool
Pixel value that indicates missing data in the spectrum.
nadditional : int
Number of additional pixels to be masked at each border.
Returns
-------
mask2d : numpy array
2D mask.
borders : list of tuples
List of tuples (jmin, jmax) with the border limits (in array
coordinates) found by find_pix_borders.
"""
# input image size
naxis2, naxis1 = image2d.shape
# initialize mask
mask2d = np.zeros((naxis2, naxis1), dtype=bool)
# initialize list to store borders
borders = []
for i in range(naxis2):
# only spectra with values different from 'sought_value'
jborder_min, jborder_max = find_pix_borders(
image2d[i, :],
sought_value=sought_value
)
borders.append((jborder_min, jborder_max))
if (jborder_min, jborder_max) != (-1, naxis1):
if jborder_min != -1:
j1 = 0
j2 = jborder_min + nadditional + 1
mask2d[i, j1:j2] = True
if jborder_max != naxis1:
j1 = jborder_max - nadditional
j2 = naxis1
mask2d[i, j1:j2] = True
return mask2d, borders | [
"def",
"define_mask_borders",
"(",
"image2d",
",",
"sought_value",
",",
"nadditional",
"=",
"0",
")",
":",
"# input image size",
"naxis2",
",",
"naxis1",
"=",
"image2d",
".",
"shape",
"# initialize mask",
"mask2d",
"=",
"np",
".",
"zeros",
"(",
"(",
"naxis2",
",",
"naxis1",
")",
",",
"dtype",
"=",
"bool",
")",
"# initialize list to store borders",
"borders",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"naxis2",
")",
":",
"# only spectra with values different from 'sought_value'",
"jborder_min",
",",
"jborder_max",
"=",
"find_pix_borders",
"(",
"image2d",
"[",
"i",
",",
":",
"]",
",",
"sought_value",
"=",
"sought_value",
")",
"borders",
".",
"append",
"(",
"(",
"jborder_min",
",",
"jborder_max",
")",
")",
"if",
"(",
"jborder_min",
",",
"jborder_max",
")",
"!=",
"(",
"-",
"1",
",",
"naxis1",
")",
":",
"if",
"jborder_min",
"!=",
"-",
"1",
":",
"j1",
"=",
"0",
"j2",
"=",
"jborder_min",
"+",
"nadditional",
"+",
"1",
"mask2d",
"[",
"i",
",",
"j1",
":",
"j2",
"]",
"=",
"True",
"if",
"jborder_max",
"!=",
"naxis1",
":",
"j1",
"=",
"jborder_max",
"-",
"nadditional",
"j2",
"=",
"naxis1",
"mask2d",
"[",
"i",
",",
"j1",
":",
"j2",
"]",
"=",
"True",
"return",
"mask2d",
",",
"borders"
] | Generate mask avoiding undesired values at the borders.
Set to True image borders with values equal to 'sought_value'
Parameters
----------
image2d : numpy array
Initial 2D image.
sought_value : int, float, bool
Pixel value that indicates missing data in the spectrum.
nadditional : int
Number of additional pixels to be masked at each border.
Returns
-------
mask2d : numpy array
2D mask.
borders : list of tuples
List of tuples (jmin, jmax) with the border limits (in array
coordinates) found by find_pix_borders. | [
"Generate",
"mask",
"avoiding",
"undesired",
"values",
"at",
"the",
"borders",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/fix_pix_borders.py#L111-L161 | train |
guaix-ucm/numina | numina/array/wavecalib/solutionarc.py | SolutionArcCalibration.update_features | def update_features(self, poly):
"""Evaluate wavelength at xpos using the provided polynomial."""
for feature in self.features:
feature.wavelength = poly(feature.xpos) | python | def update_features(self, poly):
"""Evaluate wavelength at xpos using the provided polynomial."""
for feature in self.features:
feature.wavelength = poly(feature.xpos) | [
"def",
"update_features",
"(",
"self",
",",
"poly",
")",
":",
"for",
"feature",
"in",
"self",
".",
"features",
":",
"feature",
".",
"wavelength",
"=",
"poly",
"(",
"feature",
".",
"xpos",
")"
] | Evaluate wavelength at xpos using the provided polynomial. | [
"Evaluate",
"wavelength",
"at",
"xpos",
"using",
"the",
"provided",
"polynomial",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/solutionarc.py#L203-L207 | train |
guaix-ucm/numina | numina/core/oresult.py | dataframe_from_list | def dataframe_from_list(values):
"""Build a DataFrame object from a list."""
if(isinstance(values, six.string_types)):
return DataFrame(filename=values)
elif(isinstance(values, fits.HDUList)):
return DataFrame(frame=values)
else:
return None | python | def dataframe_from_list(values):
"""Build a DataFrame object from a list."""
if(isinstance(values, six.string_types)):
return DataFrame(filename=values)
elif(isinstance(values, fits.HDUList)):
return DataFrame(frame=values)
else:
return None | [
"def",
"dataframe_from_list",
"(",
"values",
")",
":",
"if",
"(",
"isinstance",
"(",
"values",
",",
"six",
".",
"string_types",
")",
")",
":",
"return",
"DataFrame",
"(",
"filename",
"=",
"values",
")",
"elif",
"(",
"isinstance",
"(",
"values",
",",
"fits",
".",
"HDUList",
")",
")",
":",
"return",
"DataFrame",
"(",
"frame",
"=",
"values",
")",
"else",
":",
"return",
"None"
] | Build a DataFrame object from a list. | [
"Build",
"a",
"DataFrame",
"object",
"from",
"a",
"list",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/oresult.py#L86-L93 | train |
guaix-ucm/numina | numina/core/oresult.py | obsres_from_dict | def obsres_from_dict(values):
"""Build a ObservationResult object from a dictionary."""
obsres = ObservationResult()
ikey = 'frames'
# Workaround
if 'images' in values:
ikey = 'images'
obsres.id = values.get('id', 1)
obsres.mode = values['mode']
obsres.instrument = values['instrument']
obsres.configuration = values.get('configuration', 'default')
obsres.pipeline = values.get('pipeline', 'default')
obsres.children = values.get('children', [])
obsres.parent = values.get('parent', None)
obsres.results = values.get('results', {})
obsres.requirements = values.get('requirements', {})
try:
obsres.frames = [dataframe_from_list(val) for val in values[ikey]]
except Exception:
obsres.frames = []
return obsres | python | def obsres_from_dict(values):
"""Build a ObservationResult object from a dictionary."""
obsres = ObservationResult()
ikey = 'frames'
# Workaround
if 'images' in values:
ikey = 'images'
obsres.id = values.get('id', 1)
obsres.mode = values['mode']
obsres.instrument = values['instrument']
obsres.configuration = values.get('configuration', 'default')
obsres.pipeline = values.get('pipeline', 'default')
obsres.children = values.get('children', [])
obsres.parent = values.get('parent', None)
obsres.results = values.get('results', {})
obsres.requirements = values.get('requirements', {})
try:
obsres.frames = [dataframe_from_list(val) for val in values[ikey]]
except Exception:
obsres.frames = []
return obsres | [
"def",
"obsres_from_dict",
"(",
"values",
")",
":",
"obsres",
"=",
"ObservationResult",
"(",
")",
"ikey",
"=",
"'frames'",
"# Workaround",
"if",
"'images'",
"in",
"values",
":",
"ikey",
"=",
"'images'",
"obsres",
".",
"id",
"=",
"values",
".",
"get",
"(",
"'id'",
",",
"1",
")",
"obsres",
".",
"mode",
"=",
"values",
"[",
"'mode'",
"]",
"obsres",
".",
"instrument",
"=",
"values",
"[",
"'instrument'",
"]",
"obsres",
".",
"configuration",
"=",
"values",
".",
"get",
"(",
"'configuration'",
",",
"'default'",
")",
"obsres",
".",
"pipeline",
"=",
"values",
".",
"get",
"(",
"'pipeline'",
",",
"'default'",
")",
"obsres",
".",
"children",
"=",
"values",
".",
"get",
"(",
"'children'",
",",
"[",
"]",
")",
"obsres",
".",
"parent",
"=",
"values",
".",
"get",
"(",
"'parent'",
",",
"None",
")",
"obsres",
".",
"results",
"=",
"values",
".",
"get",
"(",
"'results'",
",",
"{",
"}",
")",
"obsres",
".",
"requirements",
"=",
"values",
".",
"get",
"(",
"'requirements'",
",",
"{",
"}",
")",
"try",
":",
"obsres",
".",
"frames",
"=",
"[",
"dataframe_from_list",
"(",
"val",
")",
"for",
"val",
"in",
"values",
"[",
"ikey",
"]",
"]",
"except",
"Exception",
":",
"obsres",
".",
"frames",
"=",
"[",
"]",
"return",
"obsres"
] | Build a ObservationResult object from a dictionary. | [
"Build",
"a",
"ObservationResult",
"object",
"from",
"a",
"dictionary",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/oresult.py#L96-L120 | train |
guaix-ucm/numina | numina/core/oresult.py | ObservationResult.get_sample_frame | def get_sample_frame(self):
"""Return first available image in observation result"""
for frame in self.frames:
return frame.open()
for res in self.results.values():
return res.open()
return None | python | def get_sample_frame(self):
"""Return first available image in observation result"""
for frame in self.frames:
return frame.open()
for res in self.results.values():
return res.open()
return None | [
"def",
"get_sample_frame",
"(",
"self",
")",
":",
"for",
"frame",
"in",
"self",
".",
"frames",
":",
"return",
"frame",
".",
"open",
"(",
")",
"for",
"res",
"in",
"self",
".",
"results",
".",
"values",
"(",
")",
":",
"return",
"res",
".",
"open",
"(",
")",
"return",
"None"
] | Return first available image in observation result | [
"Return",
"first",
"available",
"image",
"in",
"observation",
"result"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/oresult.py#L75-L83 | train |
guaix-ucm/numina | numina/array/nirproc.py | fowler_array | def fowler_array(fowlerdata, ti=0.0, ts=0.0, gain=1.0, ron=1.0,
badpixels=None, dtype='float64',
saturation=65631, blank=0, normalize=False):
"""Loop over the first axis applying Fowler processing.
*fowlerdata* is assumed to be a 3D numpy.ndarray containing the
result of a nIR observation in Fowler mode (Fowler and Gatley 1991).
The shape of the array must be of the form 2N_p x M x N, with N_p being
the number of pairs in Fowler mode.
The output signal is just the mean value of the differences between the
last N_p values (S_i) and the first N_p values (R-i).
.. math::
S_F = \\frac{1}{N_p}\\sum\\limits_{i=0}^{N_p-1} S_i - R_i
If the source has a radiance F, then the measured signal is equivalent
to:
.. math::
S_F = F T_I - F T_S (N_p -1) = F T_E
being T_I the integration time (*ti*), the time since the first
productive read to the last productive read for a given pixel and T_S the
time between samples (*ts*). T_E is the time between correlated reads
:math:`T_E = T_I - T_S (N_p - 1)`.
The variance of the signnal is the sum of two terms, one for the readout
noise:
.. math::
\\mathrm{var}(S_{F1}) =\\frac{2\sigma_R^2}{N_p}
and other for the photon noise:
.. math::
\\mathrm{var}(S_{F2}) = F T_E - F T_S \\frac{1}{3}(N_p-\\frac{1}{N_p})
= F T_I - F T_S (\\frac{4}{3} N_p -1 - \\frac{1}{3N_p})
:param fowlerdata: Convertible to a 3D numpy.ndarray with first axis even
:param ti: Integration time.
:param ts: Time between samples.
:param gain: Detector gain.
:param ron: Detector readout noise in counts.
:param badpixels: An optional MxN mask of dtype 'uint8'.
:param dtype: The dtype of the float outputs.
:param saturation: The saturation level of the detector.
:param blank: Invalid values in output are substituted by *blank*.
:returns: A tuple of (signal, variance of the signal, numper of pixels used
and badpixel mask.
:raises: ValueError
"""
import numina.array._nirproc as _nirproc
if gain <= 0:
raise ValueError("invalid parameter, gain <= 0.0")
if ron <= 0:
raise ValueError("invalid parameter, ron < 0.0")
if ti < 0:
raise ValueError("invalid parameter, ti < 0.0")
if ts < 0:
raise ValueError("invalid parameter, ts < 0.0")
if saturation <= 0:
raise ValueError("invalid parameter, saturation <= 0")
fowlerdata = numpy.asarray(fowlerdata)
if fowlerdata.ndim != 3:
raise ValueError('fowlerdata must be 3D')
npairs = fowlerdata.shape[0] // 2
if 2 * npairs != fowlerdata.shape[0]:
raise ValueError('axis-0 in fowlerdata must be even')
# change byteorder
ndtype = fowlerdata.dtype.newbyteorder('=')
fowlerdata = numpy.asarray(fowlerdata, dtype=ndtype)
# type of the output
fdtype = numpy.result_type(fowlerdata.dtype, dtype)
# Type of the mask
mdtype = numpy.dtype('uint8')
fshape = (fowlerdata.shape[1], fowlerdata.shape[2])
if badpixels is None:
badpixels = numpy.zeros(fshape, dtype=mdtype)
else:
if badpixels.shape != fshape:
raise ValueError('shape of badpixels is not '
'compatible with shape of fowlerdata')
if badpixels.dtype != mdtype:
raise ValueError('dtype of badpixels must be uint8')
result = numpy.empty(fshape, dtype=fdtype)
var = numpy.empty_like(result)
npix = numpy.empty(fshape, dtype=mdtype)
mask = badpixels.copy()
_nirproc._process_fowler_intl(
fowlerdata, ti, ts, gain, ron,
badpixels, saturation, blank,
result, var, npix, mask
)
return result, var, npix, mask | python | def fowler_array(fowlerdata, ti=0.0, ts=0.0, gain=1.0, ron=1.0,
badpixels=None, dtype='float64',
saturation=65631, blank=0, normalize=False):
"""Loop over the first axis applying Fowler processing.
*fowlerdata* is assumed to be a 3D numpy.ndarray containing the
result of a nIR observation in Fowler mode (Fowler and Gatley 1991).
The shape of the array must be of the form 2N_p x M x N, with N_p being
the number of pairs in Fowler mode.
The output signal is just the mean value of the differences between the
last N_p values (S_i) and the first N_p values (R-i).
.. math::
S_F = \\frac{1}{N_p}\\sum\\limits_{i=0}^{N_p-1} S_i - R_i
If the source has a radiance F, then the measured signal is equivalent
to:
.. math::
S_F = F T_I - F T_S (N_p -1) = F T_E
being T_I the integration time (*ti*), the time since the first
productive read to the last productive read for a given pixel and T_S the
time between samples (*ts*). T_E is the time between correlated reads
:math:`T_E = T_I - T_S (N_p - 1)`.
The variance of the signnal is the sum of two terms, one for the readout
noise:
.. math::
\\mathrm{var}(S_{F1}) =\\frac{2\sigma_R^2}{N_p}
and other for the photon noise:
.. math::
\\mathrm{var}(S_{F2}) = F T_E - F T_S \\frac{1}{3}(N_p-\\frac{1}{N_p})
= F T_I - F T_S (\\frac{4}{3} N_p -1 - \\frac{1}{3N_p})
:param fowlerdata: Convertible to a 3D numpy.ndarray with first axis even
:param ti: Integration time.
:param ts: Time between samples.
:param gain: Detector gain.
:param ron: Detector readout noise in counts.
:param badpixels: An optional MxN mask of dtype 'uint8'.
:param dtype: The dtype of the float outputs.
:param saturation: The saturation level of the detector.
:param blank: Invalid values in output are substituted by *blank*.
:returns: A tuple of (signal, variance of the signal, numper of pixels used
and badpixel mask.
:raises: ValueError
"""
import numina.array._nirproc as _nirproc
if gain <= 0:
raise ValueError("invalid parameter, gain <= 0.0")
if ron <= 0:
raise ValueError("invalid parameter, ron < 0.0")
if ti < 0:
raise ValueError("invalid parameter, ti < 0.0")
if ts < 0:
raise ValueError("invalid parameter, ts < 0.0")
if saturation <= 0:
raise ValueError("invalid parameter, saturation <= 0")
fowlerdata = numpy.asarray(fowlerdata)
if fowlerdata.ndim != 3:
raise ValueError('fowlerdata must be 3D')
npairs = fowlerdata.shape[0] // 2
if 2 * npairs != fowlerdata.shape[0]:
raise ValueError('axis-0 in fowlerdata must be even')
# change byteorder
ndtype = fowlerdata.dtype.newbyteorder('=')
fowlerdata = numpy.asarray(fowlerdata, dtype=ndtype)
# type of the output
fdtype = numpy.result_type(fowlerdata.dtype, dtype)
# Type of the mask
mdtype = numpy.dtype('uint8')
fshape = (fowlerdata.shape[1], fowlerdata.shape[2])
if badpixels is None:
badpixels = numpy.zeros(fshape, dtype=mdtype)
else:
if badpixels.shape != fshape:
raise ValueError('shape of badpixels is not '
'compatible with shape of fowlerdata')
if badpixels.dtype != mdtype:
raise ValueError('dtype of badpixels must be uint8')
result = numpy.empty(fshape, dtype=fdtype)
var = numpy.empty_like(result)
npix = numpy.empty(fshape, dtype=mdtype)
mask = badpixels.copy()
_nirproc._process_fowler_intl(
fowlerdata, ti, ts, gain, ron,
badpixels, saturation, blank,
result, var, npix, mask
)
return result, var, npix, mask | [
"def",
"fowler_array",
"(",
"fowlerdata",
",",
"ti",
"=",
"0.0",
",",
"ts",
"=",
"0.0",
",",
"gain",
"=",
"1.0",
",",
"ron",
"=",
"1.0",
",",
"badpixels",
"=",
"None",
",",
"dtype",
"=",
"'float64'",
",",
"saturation",
"=",
"65631",
",",
"blank",
"=",
"0",
",",
"normalize",
"=",
"False",
")",
":",
"import",
"numina",
".",
"array",
".",
"_nirproc",
"as",
"_nirproc",
"if",
"gain",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, gain <= 0.0\"",
")",
"if",
"ron",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, ron < 0.0\"",
")",
"if",
"ti",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, ti < 0.0\"",
")",
"if",
"ts",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, ts < 0.0\"",
")",
"if",
"saturation",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, saturation <= 0\"",
")",
"fowlerdata",
"=",
"numpy",
".",
"asarray",
"(",
"fowlerdata",
")",
"if",
"fowlerdata",
".",
"ndim",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'fowlerdata must be 3D'",
")",
"npairs",
"=",
"fowlerdata",
".",
"shape",
"[",
"0",
"]",
"//",
"2",
"if",
"2",
"*",
"npairs",
"!=",
"fowlerdata",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'axis-0 in fowlerdata must be even'",
")",
"# change byteorder",
"ndtype",
"=",
"fowlerdata",
".",
"dtype",
".",
"newbyteorder",
"(",
"'='",
")",
"fowlerdata",
"=",
"numpy",
".",
"asarray",
"(",
"fowlerdata",
",",
"dtype",
"=",
"ndtype",
")",
"# type of the output",
"fdtype",
"=",
"numpy",
".",
"result_type",
"(",
"fowlerdata",
".",
"dtype",
",",
"dtype",
")",
"# Type of the mask",
"mdtype",
"=",
"numpy",
".",
"dtype",
"(",
"'uint8'",
")",
"fshape",
"=",
"(",
"fowlerdata",
".",
"shape",
"[",
"1",
"]",
",",
"fowlerdata",
".",
"shape",
"[",
"2",
"]",
")",
"if",
"badpixels",
"is",
"None",
":",
"badpixels",
"=",
"numpy",
".",
"zeros",
"(",
"fshape",
",",
"dtype",
"=",
"mdtype",
")",
"else",
":",
"if",
"badpixels",
".",
"shape",
"!=",
"fshape",
":",
"raise",
"ValueError",
"(",
"'shape of badpixels is not '",
"'compatible with shape of fowlerdata'",
")",
"if",
"badpixels",
".",
"dtype",
"!=",
"mdtype",
":",
"raise",
"ValueError",
"(",
"'dtype of badpixels must be uint8'",
")",
"result",
"=",
"numpy",
".",
"empty",
"(",
"fshape",
",",
"dtype",
"=",
"fdtype",
")",
"var",
"=",
"numpy",
".",
"empty_like",
"(",
"result",
")",
"npix",
"=",
"numpy",
".",
"empty",
"(",
"fshape",
",",
"dtype",
"=",
"mdtype",
")",
"mask",
"=",
"badpixels",
".",
"copy",
"(",
")",
"_nirproc",
".",
"_process_fowler_intl",
"(",
"fowlerdata",
",",
"ti",
",",
"ts",
",",
"gain",
",",
"ron",
",",
"badpixels",
",",
"saturation",
",",
"blank",
",",
"result",
",",
"var",
",",
"npix",
",",
"mask",
")",
"return",
"result",
",",
"var",
",",
"npix",
",",
"mask"
] | Loop over the first axis applying Fowler processing.
*fowlerdata* is assumed to be a 3D numpy.ndarray containing the
result of a nIR observation in Fowler mode (Fowler and Gatley 1991).
The shape of the array must be of the form 2N_p x M x N, with N_p being
the number of pairs in Fowler mode.
The output signal is just the mean value of the differences between the
last N_p values (S_i) and the first N_p values (R-i).
.. math::
S_F = \\frac{1}{N_p}\\sum\\limits_{i=0}^{N_p-1} S_i - R_i
If the source has a radiance F, then the measured signal is equivalent
to:
.. math::
S_F = F T_I - F T_S (N_p -1) = F T_E
being T_I the integration time (*ti*), the time since the first
productive read to the last productive read for a given pixel and T_S the
time between samples (*ts*). T_E is the time between correlated reads
:math:`T_E = T_I - T_S (N_p - 1)`.
The variance of the signnal is the sum of two terms, one for the readout
noise:
.. math::
\\mathrm{var}(S_{F1}) =\\frac{2\sigma_R^2}{N_p}
and other for the photon noise:
.. math::
\\mathrm{var}(S_{F2}) = F T_E - F T_S \\frac{1}{3}(N_p-\\frac{1}{N_p})
= F T_I - F T_S (\\frac{4}{3} N_p -1 - \\frac{1}{3N_p})
:param fowlerdata: Convertible to a 3D numpy.ndarray with first axis even
:param ti: Integration time.
:param ts: Time between samples.
:param gain: Detector gain.
:param ron: Detector readout noise in counts.
:param badpixels: An optional MxN mask of dtype 'uint8'.
:param dtype: The dtype of the float outputs.
:param saturation: The saturation level of the detector.
:param blank: Invalid values in output are substituted by *blank*.
:returns: A tuple of (signal, variance of the signal, numper of pixels used
and badpixel mask.
:raises: ValueError | [
"Loop",
"over",
"the",
"first",
"axis",
"applying",
"Fowler",
"processing",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/nirproc.py#L18-L132 | train |
guaix-ucm/numina | numina/array/nirproc.py | ramp_array | def ramp_array(rampdata, ti, gain=1.0, ron=1.0,
badpixels=None, dtype='float64',
saturation=65631, blank=0, nsig=None, normalize=False):
"""Loop over the first axis applying ramp processing.
*rampdata* is assumed to be a 3D numpy.ndarray containing the
result of a nIR observation in folow-up-the-ramp mode.
The shape of the array must be of the form N_s x M x N, with N_s being
the number of samples.
:param fowlerdata: Convertible to a 3D numpy.ndarray
:param ti: Integration time.
:param gain: Detector gain.
:param ron: Detector readout noise in counts.
:param badpixels: An optional MxN mask of dtype 'uint8'.
:param dtype: The dtype of the float outputs.
:param saturation: The saturation level of the detector.
:param blank: Invalid values in output are substituted by *blank*.
:returns: A tuple of signal, variance of the signal, numper of pixels used
and badpixel mask.
:raises: ValueError
"""
import numina.array._nirproc as _nirproc
if ti <= 0:
raise ValueError("invalid parameter, ti <= 0.0")
if gain <= 0:
raise ValueError("invalid parameter, gain <= 0.0")
if ron <= 0:
raise ValueError("invalid parameter, ron < 0.0")
if saturation <= 0:
raise ValueError("invalid parameter, saturation <= 0")
rampdata = numpy.asarray(rampdata)
if rampdata.ndim != 3:
raise ValueError('rampdata must be 3D')
# change byteorder
ndtype = rampdata.dtype.newbyteorder('=')
rampdata = numpy.asarray(rampdata, dtype=ndtype)
# type of the output
fdtype = numpy.result_type(rampdata.dtype, dtype)
# Type of the mask
mdtype = numpy.dtype('uint8')
fshape = (rampdata.shape[1], rampdata.shape[2])
if badpixels is None:
badpixels = numpy.zeros(fshape, dtype=mdtype)
else:
if badpixels.shape != fshape:
msg = 'shape of badpixels is not compatible with shape of rampdata'
raise ValueError(msg)
if badpixels.dtype != mdtype:
raise ValueError('dtype of badpixels must be uint8')
result = numpy.empty(fshape, dtype=fdtype)
var = numpy.empty_like(result)
npix = numpy.empty(fshape, dtype=mdtype)
mask = badpixels.copy()
_nirproc._process_ramp_intl(
rampdata, ti, gain, ron, badpixels,
saturation, blank, result, var, npix, mask
)
return result, var, npix, mask | python | def ramp_array(rampdata, ti, gain=1.0, ron=1.0,
badpixels=None, dtype='float64',
saturation=65631, blank=0, nsig=None, normalize=False):
"""Loop over the first axis applying ramp processing.
*rampdata* is assumed to be a 3D numpy.ndarray containing the
result of a nIR observation in folow-up-the-ramp mode.
The shape of the array must be of the form N_s x M x N, with N_s being
the number of samples.
:param fowlerdata: Convertible to a 3D numpy.ndarray
:param ti: Integration time.
:param gain: Detector gain.
:param ron: Detector readout noise in counts.
:param badpixels: An optional MxN mask of dtype 'uint8'.
:param dtype: The dtype of the float outputs.
:param saturation: The saturation level of the detector.
:param blank: Invalid values in output are substituted by *blank*.
:returns: A tuple of signal, variance of the signal, numper of pixels used
and badpixel mask.
:raises: ValueError
"""
import numina.array._nirproc as _nirproc
if ti <= 0:
raise ValueError("invalid parameter, ti <= 0.0")
if gain <= 0:
raise ValueError("invalid parameter, gain <= 0.0")
if ron <= 0:
raise ValueError("invalid parameter, ron < 0.0")
if saturation <= 0:
raise ValueError("invalid parameter, saturation <= 0")
rampdata = numpy.asarray(rampdata)
if rampdata.ndim != 3:
raise ValueError('rampdata must be 3D')
# change byteorder
ndtype = rampdata.dtype.newbyteorder('=')
rampdata = numpy.asarray(rampdata, dtype=ndtype)
# type of the output
fdtype = numpy.result_type(rampdata.dtype, dtype)
# Type of the mask
mdtype = numpy.dtype('uint8')
fshape = (rampdata.shape[1], rampdata.shape[2])
if badpixels is None:
badpixels = numpy.zeros(fshape, dtype=mdtype)
else:
if badpixels.shape != fshape:
msg = 'shape of badpixels is not compatible with shape of rampdata'
raise ValueError(msg)
if badpixels.dtype != mdtype:
raise ValueError('dtype of badpixels must be uint8')
result = numpy.empty(fshape, dtype=fdtype)
var = numpy.empty_like(result)
npix = numpy.empty(fshape, dtype=mdtype)
mask = badpixels.copy()
_nirproc._process_ramp_intl(
rampdata, ti, gain, ron, badpixels,
saturation, blank, result, var, npix, mask
)
return result, var, npix, mask | [
"def",
"ramp_array",
"(",
"rampdata",
",",
"ti",
",",
"gain",
"=",
"1.0",
",",
"ron",
"=",
"1.0",
",",
"badpixels",
"=",
"None",
",",
"dtype",
"=",
"'float64'",
",",
"saturation",
"=",
"65631",
",",
"blank",
"=",
"0",
",",
"nsig",
"=",
"None",
",",
"normalize",
"=",
"False",
")",
":",
"import",
"numina",
".",
"array",
".",
"_nirproc",
"as",
"_nirproc",
"if",
"ti",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, ti <= 0.0\"",
")",
"if",
"gain",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, gain <= 0.0\"",
")",
"if",
"ron",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, ron < 0.0\"",
")",
"if",
"saturation",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, saturation <= 0\"",
")",
"rampdata",
"=",
"numpy",
".",
"asarray",
"(",
"rampdata",
")",
"if",
"rampdata",
".",
"ndim",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'rampdata must be 3D'",
")",
"# change byteorder",
"ndtype",
"=",
"rampdata",
".",
"dtype",
".",
"newbyteorder",
"(",
"'='",
")",
"rampdata",
"=",
"numpy",
".",
"asarray",
"(",
"rampdata",
",",
"dtype",
"=",
"ndtype",
")",
"# type of the output",
"fdtype",
"=",
"numpy",
".",
"result_type",
"(",
"rampdata",
".",
"dtype",
",",
"dtype",
")",
"# Type of the mask",
"mdtype",
"=",
"numpy",
".",
"dtype",
"(",
"'uint8'",
")",
"fshape",
"=",
"(",
"rampdata",
".",
"shape",
"[",
"1",
"]",
",",
"rampdata",
".",
"shape",
"[",
"2",
"]",
")",
"if",
"badpixels",
"is",
"None",
":",
"badpixels",
"=",
"numpy",
".",
"zeros",
"(",
"fshape",
",",
"dtype",
"=",
"mdtype",
")",
"else",
":",
"if",
"badpixels",
".",
"shape",
"!=",
"fshape",
":",
"msg",
"=",
"'shape of badpixels is not compatible with shape of rampdata'",
"raise",
"ValueError",
"(",
"msg",
")",
"if",
"badpixels",
".",
"dtype",
"!=",
"mdtype",
":",
"raise",
"ValueError",
"(",
"'dtype of badpixels must be uint8'",
")",
"result",
"=",
"numpy",
".",
"empty",
"(",
"fshape",
",",
"dtype",
"=",
"fdtype",
")",
"var",
"=",
"numpy",
".",
"empty_like",
"(",
"result",
")",
"npix",
"=",
"numpy",
".",
"empty",
"(",
"fshape",
",",
"dtype",
"=",
"mdtype",
")",
"mask",
"=",
"badpixels",
".",
"copy",
"(",
")",
"_nirproc",
".",
"_process_ramp_intl",
"(",
"rampdata",
",",
"ti",
",",
"gain",
",",
"ron",
",",
"badpixels",
",",
"saturation",
",",
"blank",
",",
"result",
",",
"var",
",",
"npix",
",",
"mask",
")",
"return",
"result",
",",
"var",
",",
"npix",
",",
"mask"
] | Loop over the first axis applying ramp processing.
*rampdata* is assumed to be a 3D numpy.ndarray containing the
result of a nIR observation in folow-up-the-ramp mode.
The shape of the array must be of the form N_s x M x N, with N_s being
the number of samples.
:param fowlerdata: Convertible to a 3D numpy.ndarray
:param ti: Integration time.
:param gain: Detector gain.
:param ron: Detector readout noise in counts.
:param badpixels: An optional MxN mask of dtype 'uint8'.
:param dtype: The dtype of the float outputs.
:param saturation: The saturation level of the detector.
:param blank: Invalid values in output are substituted by *blank*.
:returns: A tuple of signal, variance of the signal, numper of pixels used
and badpixel mask.
:raises: ValueError | [
"Loop",
"over",
"the",
"first",
"axis",
"applying",
"ramp",
"processing",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/nirproc.py#L135-L202 | train |
ponty/eagexp | eagexp/cmd.py | accept_freeware_license | def accept_freeware_license():
'''different Eagle versions need differnt TAB count.
6.5 -> 2
6.6 -> 3
7.4 -> 2
'''
ntab = 3 if version().startswith('6.6.') else 2
for _ in range(ntab):
EasyProcess('xdotool key KP_Tab').call()
time.sleep(0.5)
EasyProcess('xdotool key KP_Space').call()
time.sleep(0.5)
# say OK to any more question
EasyProcess('xdotool key KP_Space').call() | python | def accept_freeware_license():
'''different Eagle versions need differnt TAB count.
6.5 -> 2
6.6 -> 3
7.4 -> 2
'''
ntab = 3 if version().startswith('6.6.') else 2
for _ in range(ntab):
EasyProcess('xdotool key KP_Tab').call()
time.sleep(0.5)
EasyProcess('xdotool key KP_Space').call()
time.sleep(0.5)
# say OK to any more question
EasyProcess('xdotool key KP_Space').call() | [
"def",
"accept_freeware_license",
"(",
")",
":",
"ntab",
"=",
"3",
"if",
"version",
"(",
")",
".",
"startswith",
"(",
"'6.6.'",
")",
"else",
"2",
"for",
"_",
"in",
"range",
"(",
"ntab",
")",
":",
"EasyProcess",
"(",
"'xdotool key KP_Tab'",
")",
".",
"call",
"(",
")",
"time",
".",
"sleep",
"(",
"0.5",
")",
"EasyProcess",
"(",
"'xdotool key KP_Space'",
")",
".",
"call",
"(",
")",
"time",
".",
"sleep",
"(",
"0.5",
")",
"# say OK to any more question",
"EasyProcess",
"(",
"'xdotool key KP_Space'",
")",
".",
"call",
"(",
")"
] | different Eagle versions need differnt TAB count.
6.5 -> 2
6.6 -> 3
7.4 -> 2 | [
"different",
"Eagle",
"versions",
"need",
"differnt",
"TAB",
"count",
".",
"6",
".",
"5",
"-",
">",
"2",
"6",
".",
"6",
"-",
">",
"3",
"7",
".",
"4",
"-",
">",
"2"
] | 1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2 | https://github.com/ponty/eagexp/blob/1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2/eagexp/cmd.py#L20-L35 | train |
guaix-ucm/numina | numina/util/fqn.py | fully_qualified_name | def fully_qualified_name(obj, sep='.'):
"""Return fully qualified name from object"""
if inspect.isclass(obj):
return obj.__module__ + sep + obj.__name__
else:
return obj.__module__ + sep + obj.__class__.__name__ | python | def fully_qualified_name(obj, sep='.'):
"""Return fully qualified name from object"""
if inspect.isclass(obj):
return obj.__module__ + sep + obj.__name__
else:
return obj.__module__ + sep + obj.__class__.__name__ | [
"def",
"fully_qualified_name",
"(",
"obj",
",",
"sep",
"=",
"'.'",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"obj",
")",
":",
"return",
"obj",
".",
"__module__",
"+",
"sep",
"+",
"obj",
".",
"__name__",
"else",
":",
"return",
"obj",
".",
"__module__",
"+",
"sep",
"+",
"obj",
".",
"__class__",
".",
"__name__"
] | Return fully qualified name from object | [
"Return",
"fully",
"qualified",
"name",
"from",
"object"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/util/fqn.py#L16-L21 | train |
guaix-ucm/numina | numina/array/numsplines.py | fun_residuals | def fun_residuals(params, xnor, ynor, w, bbox, k, ext):
"""Compute fit residuals"""
spl = LSQUnivariateSpline(
x=xnor,
y=ynor,
t=[item.value for item in params.values()],
w=w,
bbox=bbox,
k=k,
ext=ext,
check_finite=False
)
return spl.get_residual() | python | def fun_residuals(params, xnor, ynor, w, bbox, k, ext):
"""Compute fit residuals"""
spl = LSQUnivariateSpline(
x=xnor,
y=ynor,
t=[item.value for item in params.values()],
w=w,
bbox=bbox,
k=k,
ext=ext,
check_finite=False
)
return spl.get_residual() | [
"def",
"fun_residuals",
"(",
"params",
",",
"xnor",
",",
"ynor",
",",
"w",
",",
"bbox",
",",
"k",
",",
"ext",
")",
":",
"spl",
"=",
"LSQUnivariateSpline",
"(",
"x",
"=",
"xnor",
",",
"y",
"=",
"ynor",
",",
"t",
"=",
"[",
"item",
".",
"value",
"for",
"item",
"in",
"params",
".",
"values",
"(",
")",
"]",
",",
"w",
"=",
"w",
",",
"bbox",
"=",
"bbox",
",",
"k",
"=",
"k",
",",
"ext",
"=",
"ext",
",",
"check_finite",
"=",
"False",
")",
"return",
"spl",
".",
"get_residual",
"(",
")"
] | Compute fit residuals | [
"Compute",
"fit",
"residuals"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/numsplines.py#L17-L30 | train |
guaix-ucm/numina | numina/user/clirundal.py | parse_as_yaml | def parse_as_yaml(strdict):
"""Parse a dictionary of strings as if yaml reads it"""
interm = ""
for key, val in strdict.items():
interm = "%s: %s, %s" % (key, val, interm)
fin = '{%s}' % interm
return yaml.load(fin) | python | def parse_as_yaml(strdict):
"""Parse a dictionary of strings as if yaml reads it"""
interm = ""
for key, val in strdict.items():
interm = "%s: %s, %s" % (key, val, interm)
fin = '{%s}' % interm
return yaml.load(fin) | [
"def",
"parse_as_yaml",
"(",
"strdict",
")",
":",
"interm",
"=",
"\"\"",
"for",
"key",
",",
"val",
"in",
"strdict",
".",
"items",
"(",
")",
":",
"interm",
"=",
"\"%s: %s, %s\"",
"%",
"(",
"key",
",",
"val",
",",
"interm",
")",
"fin",
"=",
"'{%s}'",
"%",
"interm",
"return",
"yaml",
".",
"load",
"(",
"fin",
")"
] | Parse a dictionary of strings as if yaml reads it | [
"Parse",
"a",
"dictionary",
"of",
"strings",
"as",
"if",
"yaml",
"reads",
"it"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/user/clirundal.py#L245-L252 | train |
SUNCAT-Center/CatHub | cathub/cli.py | folder2db | def folder2db(folder_name, debug, energy_limit, skip_folders,
goto_reaction):
"""Read folder and collect data in local sqlite3 database"""
folder_name = folder_name.rstrip('/')
skip = []
for s in skip_folders.split(', '):
for sk in s.split(','):
skip.append(sk)
pub_id = _folder2db.main(folder_name, debug, energy_limit,
skip, goto_reaction)
if pub_id:
print('')
print('')
print('Ready to release the data?')
print(
" Send it to the Catalysis-Hub server with 'cathub db2server {folder_name}/{pub_id}.db'.".format(**locals()))
print(" Then log in at www.catalysis-hub.org/upload/ to verify and release. ") | python | def folder2db(folder_name, debug, energy_limit, skip_folders,
goto_reaction):
"""Read folder and collect data in local sqlite3 database"""
folder_name = folder_name.rstrip('/')
skip = []
for s in skip_folders.split(', '):
for sk in s.split(','):
skip.append(sk)
pub_id = _folder2db.main(folder_name, debug, energy_limit,
skip, goto_reaction)
if pub_id:
print('')
print('')
print('Ready to release the data?')
print(
" Send it to the Catalysis-Hub server with 'cathub db2server {folder_name}/{pub_id}.db'.".format(**locals()))
print(" Then log in at www.catalysis-hub.org/upload/ to verify and release. ") | [
"def",
"folder2db",
"(",
"folder_name",
",",
"debug",
",",
"energy_limit",
",",
"skip_folders",
",",
"goto_reaction",
")",
":",
"folder_name",
"=",
"folder_name",
".",
"rstrip",
"(",
"'/'",
")",
"skip",
"=",
"[",
"]",
"for",
"s",
"in",
"skip_folders",
".",
"split",
"(",
"', '",
")",
":",
"for",
"sk",
"in",
"s",
".",
"split",
"(",
"','",
")",
":",
"skip",
".",
"append",
"(",
"sk",
")",
"pub_id",
"=",
"_folder2db",
".",
"main",
"(",
"folder_name",
",",
"debug",
",",
"energy_limit",
",",
"skip",
",",
"goto_reaction",
")",
"if",
"pub_id",
":",
"print",
"(",
"''",
")",
"print",
"(",
"''",
")",
"print",
"(",
"'Ready to release the data?'",
")",
"print",
"(",
"\" Send it to the Catalysis-Hub server with 'cathub db2server {folder_name}/{pub_id}.db'.\"",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
")",
"print",
"(",
"\" Then log in at www.catalysis-hub.org/upload/ to verify and release. \"",
")"
] | Read folder and collect data in local sqlite3 database | [
"Read",
"folder",
"and",
"collect",
"data",
"in",
"local",
"sqlite3",
"database"
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/cli.py#L82-L99 | train |
SUNCAT-Center/CatHub | cathub/cli.py | db2server | def db2server(dbfile, block_size, dbuser, dbpassword):
"""Transfer data from local database to Catalysis Hub server"""
_db2server.main(dbfile,
write_reaction=True,
write_ase=True,
write_publication=True,
write_reaction_system=True,
block_size=block_size,
start_block=0,
user=dbuser,
password=dbpassword) | python | def db2server(dbfile, block_size, dbuser, dbpassword):
"""Transfer data from local database to Catalysis Hub server"""
_db2server.main(dbfile,
write_reaction=True,
write_ase=True,
write_publication=True,
write_reaction_system=True,
block_size=block_size,
start_block=0,
user=dbuser,
password=dbpassword) | [
"def",
"db2server",
"(",
"dbfile",
",",
"block_size",
",",
"dbuser",
",",
"dbpassword",
")",
":",
"_db2server",
".",
"main",
"(",
"dbfile",
",",
"write_reaction",
"=",
"True",
",",
"write_ase",
"=",
"True",
",",
"write_publication",
"=",
"True",
",",
"write_reaction_system",
"=",
"True",
",",
"block_size",
"=",
"block_size",
",",
"start_block",
"=",
"0",
",",
"user",
"=",
"dbuser",
",",
"password",
"=",
"dbpassword",
")"
] | Transfer data from local database to Catalysis Hub server | [
"Transfer",
"data",
"from",
"local",
"database",
"to",
"Catalysis",
"Hub",
"server"
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/cli.py#L109-L120 | train |
SUNCAT-Center/CatHub | cathub/cli.py | reactions | def reactions(columns, n_results, write_db, queries):
"""Search for reactions"""
if not isinstance(queries, dict):
query_dict = {}
for q in queries:
key, value = q.split('=')
if key == 'distinct':
if value in ['True', 'true']:
query_dict.update({key: True})
continue
try:
value = int(value)
query_dict.update({key: value})
except BaseException:
query_dict.update({key: '{0}'.format(value)})
# Keep {0} in string.format for python2.6 compatibility
if write_db and n_results > 1000:
print("""Warning: You're attempting to write more than a 1000 rows
with geometries. This could take some time""")
data = query.get_reactions(columns=columns,
n_results=n_results,
write_db=write_db,
**query_dict)
if write_db:
return
table = []
headers = []
for row in data['reactions']['edges']:
table += [list(row['node'].values())]
headers = list(row['node'].keys())
print(tabulate(table, headers) + '\n') | python | def reactions(columns, n_results, write_db, queries):
"""Search for reactions"""
if not isinstance(queries, dict):
query_dict = {}
for q in queries:
key, value = q.split('=')
if key == 'distinct':
if value in ['True', 'true']:
query_dict.update({key: True})
continue
try:
value = int(value)
query_dict.update({key: value})
except BaseException:
query_dict.update({key: '{0}'.format(value)})
# Keep {0} in string.format for python2.6 compatibility
if write_db and n_results > 1000:
print("""Warning: You're attempting to write more than a 1000 rows
with geometries. This could take some time""")
data = query.get_reactions(columns=columns,
n_results=n_results,
write_db=write_db,
**query_dict)
if write_db:
return
table = []
headers = []
for row in data['reactions']['edges']:
table += [list(row['node'].values())]
headers = list(row['node'].keys())
print(tabulate(table, headers) + '\n') | [
"def",
"reactions",
"(",
"columns",
",",
"n_results",
",",
"write_db",
",",
"queries",
")",
":",
"if",
"not",
"isinstance",
"(",
"queries",
",",
"dict",
")",
":",
"query_dict",
"=",
"{",
"}",
"for",
"q",
"in",
"queries",
":",
"key",
",",
"value",
"=",
"q",
".",
"split",
"(",
"'='",
")",
"if",
"key",
"==",
"'distinct'",
":",
"if",
"value",
"in",
"[",
"'True'",
",",
"'true'",
"]",
":",
"query_dict",
".",
"update",
"(",
"{",
"key",
":",
"True",
"}",
")",
"continue",
"try",
":",
"value",
"=",
"int",
"(",
"value",
")",
"query_dict",
".",
"update",
"(",
"{",
"key",
":",
"value",
"}",
")",
"except",
"BaseException",
":",
"query_dict",
".",
"update",
"(",
"{",
"key",
":",
"'{0}'",
".",
"format",
"(",
"value",
")",
"}",
")",
"# Keep {0} in string.format for python2.6 compatibility",
"if",
"write_db",
"and",
"n_results",
">",
"1000",
":",
"print",
"(",
"\"\"\"Warning: You're attempting to write more than a 1000 rows\n with geometries. This could take some time\"\"\"",
")",
"data",
"=",
"query",
".",
"get_reactions",
"(",
"columns",
"=",
"columns",
",",
"n_results",
"=",
"n_results",
",",
"write_db",
"=",
"write_db",
",",
"*",
"*",
"query_dict",
")",
"if",
"write_db",
":",
"return",
"table",
"=",
"[",
"]",
"headers",
"=",
"[",
"]",
"for",
"row",
"in",
"data",
"[",
"'reactions'",
"]",
"[",
"'edges'",
"]",
":",
"table",
"+=",
"[",
"list",
"(",
"row",
"[",
"'node'",
"]",
".",
"values",
"(",
")",
")",
"]",
"headers",
"=",
"list",
"(",
"row",
"[",
"'node'",
"]",
".",
"keys",
"(",
")",
")",
"print",
"(",
"tabulate",
"(",
"table",
",",
"headers",
")",
"+",
"'\\n'",
")"
] | Search for reactions | [
"Search",
"for",
"reactions"
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/cli.py#L171-L204 | train |
SUNCAT-Center/CatHub | cathub/cli.py | publications | def publications(columns, n_results, queries):
"""Search for publications"""
if not isinstance(queries, dict):
query_dict = {}
for q in queries:
key, value = q.split('=')
if key == 'distinct':
if value in ['True', 'true']:
query_dict.update({key: True})
continue
try:
value = int(value)
query_dict.update({key: value})
except BaseException:
query_dict.update({key: '{0}'.format(value)})
if 'sort' not in query_dict:
query_dict.update({'order': '-year'})
data = query.query(table='publications',
columns=columns,
n_results=n_results,
queries=query_dict)
table = []
headers = []
for row in data['publications']['edges']:
value = list(row['node'].values())
for n, v in enumerate(value):
if isinstance(v, str) and len(v) > 20:
splited = v.split(' ')
size = 0
sentence = ''
for word in splited:
if size < 20:
size += len(word)
sentence += ' ' + word
else:
sentence += '\n' + word
size = 0
sentence += '\n'
value[n] = sentence
table += [value]
headers = list(row['node'].keys())
print(tabulate(table, headers, tablefmt="grid") + '\n') | python | def publications(columns, n_results, queries):
"""Search for publications"""
if not isinstance(queries, dict):
query_dict = {}
for q in queries:
key, value = q.split('=')
if key == 'distinct':
if value in ['True', 'true']:
query_dict.update({key: True})
continue
try:
value = int(value)
query_dict.update({key: value})
except BaseException:
query_dict.update({key: '{0}'.format(value)})
if 'sort' not in query_dict:
query_dict.update({'order': '-year'})
data = query.query(table='publications',
columns=columns,
n_results=n_results,
queries=query_dict)
table = []
headers = []
for row in data['publications']['edges']:
value = list(row['node'].values())
for n, v in enumerate(value):
if isinstance(v, str) and len(v) > 20:
splited = v.split(' ')
size = 0
sentence = ''
for word in splited:
if size < 20:
size += len(word)
sentence += ' ' + word
else:
sentence += '\n' + word
size = 0
sentence += '\n'
value[n] = sentence
table += [value]
headers = list(row['node'].keys())
print(tabulate(table, headers, tablefmt="grid") + '\n') | [
"def",
"publications",
"(",
"columns",
",",
"n_results",
",",
"queries",
")",
":",
"if",
"not",
"isinstance",
"(",
"queries",
",",
"dict",
")",
":",
"query_dict",
"=",
"{",
"}",
"for",
"q",
"in",
"queries",
":",
"key",
",",
"value",
"=",
"q",
".",
"split",
"(",
"'='",
")",
"if",
"key",
"==",
"'distinct'",
":",
"if",
"value",
"in",
"[",
"'True'",
",",
"'true'",
"]",
":",
"query_dict",
".",
"update",
"(",
"{",
"key",
":",
"True",
"}",
")",
"continue",
"try",
":",
"value",
"=",
"int",
"(",
"value",
")",
"query_dict",
".",
"update",
"(",
"{",
"key",
":",
"value",
"}",
")",
"except",
"BaseException",
":",
"query_dict",
".",
"update",
"(",
"{",
"key",
":",
"'{0}'",
".",
"format",
"(",
"value",
")",
"}",
")",
"if",
"'sort'",
"not",
"in",
"query_dict",
":",
"query_dict",
".",
"update",
"(",
"{",
"'order'",
":",
"'-year'",
"}",
")",
"data",
"=",
"query",
".",
"query",
"(",
"table",
"=",
"'publications'",
",",
"columns",
"=",
"columns",
",",
"n_results",
"=",
"n_results",
",",
"queries",
"=",
"query_dict",
")",
"table",
"=",
"[",
"]",
"headers",
"=",
"[",
"]",
"for",
"row",
"in",
"data",
"[",
"'publications'",
"]",
"[",
"'edges'",
"]",
":",
"value",
"=",
"list",
"(",
"row",
"[",
"'node'",
"]",
".",
"values",
"(",
")",
")",
"for",
"n",
",",
"v",
"in",
"enumerate",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
"and",
"len",
"(",
"v",
")",
">",
"20",
":",
"splited",
"=",
"v",
".",
"split",
"(",
"' '",
")",
"size",
"=",
"0",
"sentence",
"=",
"''",
"for",
"word",
"in",
"splited",
":",
"if",
"size",
"<",
"20",
":",
"size",
"+=",
"len",
"(",
"word",
")",
"sentence",
"+=",
"' '",
"+",
"word",
"else",
":",
"sentence",
"+=",
"'\\n'",
"+",
"word",
"size",
"=",
"0",
"sentence",
"+=",
"'\\n'",
"value",
"[",
"n",
"]",
"=",
"sentence",
"table",
"+=",
"[",
"value",
"]",
"headers",
"=",
"list",
"(",
"row",
"[",
"'node'",
"]",
".",
"keys",
"(",
")",
")",
"print",
"(",
"tabulate",
"(",
"table",
",",
"headers",
",",
"tablefmt",
"=",
"\"grid\"",
")",
"+",
"'\\n'",
")"
] | Search for publications | [
"Search",
"for",
"publications"
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/cli.py#L223-L266 | train |
SUNCAT-Center/CatHub | cathub/cli.py | make_folders | def make_folders(template, custom_base):
"""Create a basic folder tree for dumping DFT calculcations for reaction energies.
Dear all
Use this command make the right structure for your folders
for submitting data for Catalysis Hub's Surface Reactions.
Start by creating a template file by calling:
$ cathub make_folders <template_name>
Then open the template and modify it to so that it contains information
about your data. You will need to enter publication/dataset information,
and specify the types of surfaces, facets and reactions.
The 'reactions' entry should include two lists for each reaction;
'reactants' and 'products', corresponding to left- and right hand side of
each chemical equation respectively.
Remember to balance the equation by including a prefactor or minus sign
in the name when relevant. For example:
reactions:
- reactants: ['CCH3star@ontop']
products: ['Cstar@hollow', 'CH3star@ontop']
- reactants: ['CH4gas', '-0.5H2gas', 'star']
products: ['CH3star']
Please include the phase of the species as an extension:
'gas' for gas phase (i.e. CH4 -> CH4gas)
'star' for empty slab or adsorbed phase. (i.e. OH -> OHstar)
The site of adsorbed species is also included as an extension:
'@site' (i.e. OHstar in bridge-> OHstar@bridge)
Energy corrections to gas phase molecules can be included as:
energy_corrections: {H2: 0.1, CH4: -0.15}
Then, save the template and call:
$ cathub make_folders <template_name>
And folders will be created automatically.
You can create several templates and call make_folders again
if you, for example, are using different functionals or are
doing different reactions on different surfaces.
After creating your folders, add your output files from the
electronic structure calculations at the positions.
Accepted file formats include everything that can be read by ASE
and contains the total potential energy of the calculation, such
as .traj or .OUTCAR files.
After dumping your files, run `cathub folder2db <your folder>`
to collect the data.
"""
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
Dumper.add_representer(collections.OrderedDict, dict_representer)
if custom_base is None:
custom_base = os.path.abspath(os.path.curdir)
template = custom_base + '/' + template
template_data = ase_tools.REACTION_TEMPLATE
if not os.path.exists(template):
with open(template, 'w') as outfile:
outfile.write(
yaml.dump(
template_data,
indent=4,
Dumper=Dumper) +
'\n')
print("Created template file: {template}\n".format(**locals()) +
' Please edit it and run the script again to create your folderstructure.\n' +
' Run cathub make_folders --help for instructions')
return
with open(template) as infile:
template_data = yaml.load(infile)
title = template_data['title']
authors = template_data['authors']
journal = template_data['journal']
volume = template_data['volume']
number = template_data['number']
pages = template_data['pages']
year = template_data['year']
email = template_data['email']
publisher = template_data['publisher']
doi = template_data['doi']
dft_code = template_data['DFT_code']
dft_functionals = template_data['DFT_functionals']
reactions = template_data['reactions']
crystal_structures = template_data['crystal_structures']
bulk_compositions = template_data['bulk_compositions']
facets = template_data['facets']
energy_corrections = template_data['energy_corrections']
make_folders_template.main(
title=title,
authors=eval(authors) if isinstance(
authors, six.string_types) else authors,
journal=journal,
volume=volume,
number=number,
pages=pages,
year=year,
email=email,
publisher=publisher,
doi=doi,
DFT_code=dft_code,
DFT_functionals=dft_functionals,
reactions=eval(reactions) if isinstance(
reactions, six.string_types) else reactions,
custom_base=custom_base,
bulk_compositions=bulk_compositions,
crystal_structures=crystal_structures,
facets=facets,
energy_corrections=energy_corrections
)
pub_id = tools.get_pub_id(title, authors, year)
print(
"Now dump your DFT output files into the folder, and run 'cathub folder2db {pub_id}'".format(**locals())) | python | def make_folders(template, custom_base):
"""Create a basic folder tree for dumping DFT calculcations for reaction energies.
Dear all
Use this command make the right structure for your folders
for submitting data for Catalysis Hub's Surface Reactions.
Start by creating a template file by calling:
$ cathub make_folders <template_name>
Then open the template and modify it to so that it contains information
about your data. You will need to enter publication/dataset information,
and specify the types of surfaces, facets and reactions.
The 'reactions' entry should include two lists for each reaction;
'reactants' and 'products', corresponding to left- and right hand side of
each chemical equation respectively.
Remember to balance the equation by including a prefactor or minus sign
in the name when relevant. For example:
reactions:
- reactants: ['CCH3star@ontop']
products: ['Cstar@hollow', 'CH3star@ontop']
- reactants: ['CH4gas', '-0.5H2gas', 'star']
products: ['CH3star']
Please include the phase of the species as an extension:
'gas' for gas phase (i.e. CH4 -> CH4gas)
'star' for empty slab or adsorbed phase. (i.e. OH -> OHstar)
The site of adsorbed species is also included as an extension:
'@site' (i.e. OHstar in bridge-> OHstar@bridge)
Energy corrections to gas phase molecules can be included as:
energy_corrections: {H2: 0.1, CH4: -0.15}
Then, save the template and call:
$ cathub make_folders <template_name>
And folders will be created automatically.
You can create several templates and call make_folders again
if you, for example, are using different functionals or are
doing different reactions on different surfaces.
After creating your folders, add your output files from the
electronic structure calculations at the positions.
Accepted file formats include everything that can be read by ASE
and contains the total potential energy of the calculation, such
as .traj or .OUTCAR files.
After dumping your files, run `cathub folder2db <your folder>`
to collect the data.
"""
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
Dumper.add_representer(collections.OrderedDict, dict_representer)
if custom_base is None:
custom_base = os.path.abspath(os.path.curdir)
template = custom_base + '/' + template
template_data = ase_tools.REACTION_TEMPLATE
if not os.path.exists(template):
with open(template, 'w') as outfile:
outfile.write(
yaml.dump(
template_data,
indent=4,
Dumper=Dumper) +
'\n')
print("Created template file: {template}\n".format(**locals()) +
' Please edit it and run the script again to create your folderstructure.\n' +
' Run cathub make_folders --help for instructions')
return
with open(template) as infile:
template_data = yaml.load(infile)
title = template_data['title']
authors = template_data['authors']
journal = template_data['journal']
volume = template_data['volume']
number = template_data['number']
pages = template_data['pages']
year = template_data['year']
email = template_data['email']
publisher = template_data['publisher']
doi = template_data['doi']
dft_code = template_data['DFT_code']
dft_functionals = template_data['DFT_functionals']
reactions = template_data['reactions']
crystal_structures = template_data['crystal_structures']
bulk_compositions = template_data['bulk_compositions']
facets = template_data['facets']
energy_corrections = template_data['energy_corrections']
make_folders_template.main(
title=title,
authors=eval(authors) if isinstance(
authors, six.string_types) else authors,
journal=journal,
volume=volume,
number=number,
pages=pages,
year=year,
email=email,
publisher=publisher,
doi=doi,
DFT_code=dft_code,
DFT_functionals=dft_functionals,
reactions=eval(reactions) if isinstance(
reactions, six.string_types) else reactions,
custom_base=custom_base,
bulk_compositions=bulk_compositions,
crystal_structures=crystal_structures,
facets=facets,
energy_corrections=energy_corrections
)
pub_id = tools.get_pub_id(title, authors, year)
print(
"Now dump your DFT output files into the folder, and run 'cathub folder2db {pub_id}'".format(**locals())) | [
"def",
"make_folders",
"(",
"template",
",",
"custom_base",
")",
":",
"def",
"dict_representer",
"(",
"dumper",
",",
"data",
")",
":",
"return",
"dumper",
".",
"represent_dict",
"(",
"data",
".",
"items",
"(",
")",
")",
"Dumper",
".",
"add_representer",
"(",
"collections",
".",
"OrderedDict",
",",
"dict_representer",
")",
"if",
"custom_base",
"is",
"None",
":",
"custom_base",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"curdir",
")",
"template",
"=",
"custom_base",
"+",
"'/'",
"+",
"template",
"template_data",
"=",
"ase_tools",
".",
"REACTION_TEMPLATE",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"template",
")",
":",
"with",
"open",
"(",
"template",
",",
"'w'",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"yaml",
".",
"dump",
"(",
"template_data",
",",
"indent",
"=",
"4",
",",
"Dumper",
"=",
"Dumper",
")",
"+",
"'\\n'",
")",
"print",
"(",
"\"Created template file: {template}\\n\"",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
"+",
"' Please edit it and run the script again to create your folderstructure.\\n'",
"+",
"' Run cathub make_folders --help for instructions'",
")",
"return",
"with",
"open",
"(",
"template",
")",
"as",
"infile",
":",
"template_data",
"=",
"yaml",
".",
"load",
"(",
"infile",
")",
"title",
"=",
"template_data",
"[",
"'title'",
"]",
"authors",
"=",
"template_data",
"[",
"'authors'",
"]",
"journal",
"=",
"template_data",
"[",
"'journal'",
"]",
"volume",
"=",
"template_data",
"[",
"'volume'",
"]",
"number",
"=",
"template_data",
"[",
"'number'",
"]",
"pages",
"=",
"template_data",
"[",
"'pages'",
"]",
"year",
"=",
"template_data",
"[",
"'year'",
"]",
"email",
"=",
"template_data",
"[",
"'email'",
"]",
"publisher",
"=",
"template_data",
"[",
"'publisher'",
"]",
"doi",
"=",
"template_data",
"[",
"'doi'",
"]",
"dft_code",
"=",
"template_data",
"[",
"'DFT_code'",
"]",
"dft_functionals",
"=",
"template_data",
"[",
"'DFT_functionals'",
"]",
"reactions",
"=",
"template_data",
"[",
"'reactions'",
"]",
"crystal_structures",
"=",
"template_data",
"[",
"'crystal_structures'",
"]",
"bulk_compositions",
"=",
"template_data",
"[",
"'bulk_compositions'",
"]",
"facets",
"=",
"template_data",
"[",
"'facets'",
"]",
"energy_corrections",
"=",
"template_data",
"[",
"'energy_corrections'",
"]",
"make_folders_template",
".",
"main",
"(",
"title",
"=",
"title",
",",
"authors",
"=",
"eval",
"(",
"authors",
")",
"if",
"isinstance",
"(",
"authors",
",",
"six",
".",
"string_types",
")",
"else",
"authors",
",",
"journal",
"=",
"journal",
",",
"volume",
"=",
"volume",
",",
"number",
"=",
"number",
",",
"pages",
"=",
"pages",
",",
"year",
"=",
"year",
",",
"email",
"=",
"email",
",",
"publisher",
"=",
"publisher",
",",
"doi",
"=",
"doi",
",",
"DFT_code",
"=",
"dft_code",
",",
"DFT_functionals",
"=",
"dft_functionals",
",",
"reactions",
"=",
"eval",
"(",
"reactions",
")",
"if",
"isinstance",
"(",
"reactions",
",",
"six",
".",
"string_types",
")",
"else",
"reactions",
",",
"custom_base",
"=",
"custom_base",
",",
"bulk_compositions",
"=",
"bulk_compositions",
",",
"crystal_structures",
"=",
"crystal_structures",
",",
"facets",
"=",
"facets",
",",
"energy_corrections",
"=",
"energy_corrections",
")",
"pub_id",
"=",
"tools",
".",
"get_pub_id",
"(",
"title",
",",
"authors",
",",
"year",
")",
"print",
"(",
"\"Now dump your DFT output files into the folder, and run 'cathub folder2db {pub_id}'\"",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
")"
] | Create a basic folder tree for dumping DFT calculcations for reaction energies.
Dear all
Use this command make the right structure for your folders
for submitting data for Catalysis Hub's Surface Reactions.
Start by creating a template file by calling:
$ cathub make_folders <template_name>
Then open the template and modify it to so that it contains information
about your data. You will need to enter publication/dataset information,
and specify the types of surfaces, facets and reactions.
The 'reactions' entry should include two lists for each reaction;
'reactants' and 'products', corresponding to left- and right hand side of
each chemical equation respectively.
Remember to balance the equation by including a prefactor or minus sign
in the name when relevant. For example:
reactions:
- reactants: ['CCH3star@ontop']
products: ['Cstar@hollow', 'CH3star@ontop']
- reactants: ['CH4gas', '-0.5H2gas', 'star']
products: ['CH3star']
Please include the phase of the species as an extension:
'gas' for gas phase (i.e. CH4 -> CH4gas)
'star' for empty slab or adsorbed phase. (i.e. OH -> OHstar)
The site of adsorbed species is also included as an extension:
'@site' (i.e. OHstar in bridge-> OHstar@bridge)
Energy corrections to gas phase molecules can be included as:
energy_corrections: {H2: 0.1, CH4: -0.15}
Then, save the template and call:
$ cathub make_folders <template_name>
And folders will be created automatically.
You can create several templates and call make_folders again
if you, for example, are using different functionals or are
doing different reactions on different surfaces.
After creating your folders, add your output files from the
electronic structure calculations at the positions.
Accepted file formats include everything that can be read by ASE
and contains the total potential energy of the calculation, such
as .traj or .OUTCAR files.
After dumping your files, run `cathub folder2db <your folder>`
to collect the data. | [
"Create",
"a",
"basic",
"folder",
"tree",
"for",
"dumping",
"DFT",
"calculcations",
"for",
"reaction",
"energies",
"."
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/cli.py#L274-L408 | train |
SUNCAT-Center/CatHub | cathub/cli.py | organize | def organize(**kwargs):
"""Read reactions from non-organized folder"""
# do argument wrangling before turning it into an obect
# since namedtuples are immutable
if len(kwargs['adsorbates']) == 0:
print("""Warning: no adsorbates specified,
can't pick up reaction reaction energies.""")
print(" Enter adsorbates like so --adsorbates CO,O,CO2")
print(" [Comma-separated list without spaces.]")
kwargs['adsorbates'] = list(map(
lambda x: (''.join(sorted(string2symbols(x)))),
kwargs['adsorbates'].split(','),
))
if kwargs['energy_corrections']:
e_c_dict = {}
for e_c in kwargs['energy_corrections'].split(','):
key, value = e_c.split('=')
e_c_dict.update({key: float(value)})
kwargs['energy_corrections'] = e_c_dict
options = collections.namedtuple(
'options',
kwargs.keys()
)(**kwargs)
_organize.main(options=options) | python | def organize(**kwargs):
"""Read reactions from non-organized folder"""
# do argument wrangling before turning it into an obect
# since namedtuples are immutable
if len(kwargs['adsorbates']) == 0:
print("""Warning: no adsorbates specified,
can't pick up reaction reaction energies.""")
print(" Enter adsorbates like so --adsorbates CO,O,CO2")
print(" [Comma-separated list without spaces.]")
kwargs['adsorbates'] = list(map(
lambda x: (''.join(sorted(string2symbols(x)))),
kwargs['adsorbates'].split(','),
))
if kwargs['energy_corrections']:
e_c_dict = {}
for e_c in kwargs['energy_corrections'].split(','):
key, value = e_c.split('=')
e_c_dict.update({key: float(value)})
kwargs['energy_corrections'] = e_c_dict
options = collections.namedtuple(
'options',
kwargs.keys()
)(**kwargs)
_organize.main(options=options) | [
"def",
"organize",
"(",
"*",
"*",
"kwargs",
")",
":",
"# do argument wrangling before turning it into an obect",
"# since namedtuples are immutable",
"if",
"len",
"(",
"kwargs",
"[",
"'adsorbates'",
"]",
")",
"==",
"0",
":",
"print",
"(",
"\"\"\"Warning: no adsorbates specified,\n can't pick up reaction reaction energies.\"\"\"",
")",
"print",
"(",
"\" Enter adsorbates like so --adsorbates CO,O,CO2\"",
")",
"print",
"(",
"\" [Comma-separated list without spaces.]\"",
")",
"kwargs",
"[",
"'adsorbates'",
"]",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"(",
"''",
".",
"join",
"(",
"sorted",
"(",
"string2symbols",
"(",
"x",
")",
")",
")",
")",
",",
"kwargs",
"[",
"'adsorbates'",
"]",
".",
"split",
"(",
"','",
")",
",",
")",
")",
"if",
"kwargs",
"[",
"'energy_corrections'",
"]",
":",
"e_c_dict",
"=",
"{",
"}",
"for",
"e_c",
"in",
"kwargs",
"[",
"'energy_corrections'",
"]",
".",
"split",
"(",
"','",
")",
":",
"key",
",",
"value",
"=",
"e_c",
".",
"split",
"(",
"'='",
")",
"e_c_dict",
".",
"update",
"(",
"{",
"key",
":",
"float",
"(",
"value",
")",
"}",
")",
"kwargs",
"[",
"'energy_corrections'",
"]",
"=",
"e_c_dict",
"options",
"=",
"collections",
".",
"namedtuple",
"(",
"'options'",
",",
"kwargs",
".",
"keys",
"(",
")",
")",
"(",
"*",
"*",
"kwargs",
")",
"_organize",
".",
"main",
"(",
"options",
"=",
"options",
")"
] | Read reactions from non-organized folder | [
"Read",
"reactions",
"from",
"non",
"-",
"organized",
"folder"
] | 324625d1d8e740673f139658b2de4c9e1059739e | https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/cli.py#L551-L576 | train |
iiSeymour/game-of-life | conway/gol.py | gol.initCurses | def initCurses(self):
"""
Set up screen properties
"""
curses.noecho()
curses.cbreak()
curses.curs_set(0)
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_WHITE, -1)
curses.init_pair(2, curses.COLOR_YELLOW, -1)
curses.init_pair(3, curses.COLOR_MAGENTA, -1)
curses.init_pair(4, curses.COLOR_CYAN, -1)
curses.init_pair(5, curses.COLOR_GREEN, -1)
curses.init_pair(6, curses.COLOR_BLUE, -1)
curses.init_pair(7, curses.COLOR_RED, -1) | python | def initCurses(self):
"""
Set up screen properties
"""
curses.noecho()
curses.cbreak()
curses.curs_set(0)
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_WHITE, -1)
curses.init_pair(2, curses.COLOR_YELLOW, -1)
curses.init_pair(3, curses.COLOR_MAGENTA, -1)
curses.init_pair(4, curses.COLOR_CYAN, -1)
curses.init_pair(5, curses.COLOR_GREEN, -1)
curses.init_pair(6, curses.COLOR_BLUE, -1)
curses.init_pair(7, curses.COLOR_RED, -1) | [
"def",
"initCurses",
"(",
"self",
")",
":",
"curses",
".",
"noecho",
"(",
")",
"curses",
".",
"cbreak",
"(",
")",
"curses",
".",
"curs_set",
"(",
"0",
")",
"curses",
".",
"start_color",
"(",
")",
"curses",
".",
"use_default_colors",
"(",
")",
"curses",
".",
"init_pair",
"(",
"1",
",",
"curses",
".",
"COLOR_WHITE",
",",
"-",
"1",
")",
"curses",
".",
"init_pair",
"(",
"2",
",",
"curses",
".",
"COLOR_YELLOW",
",",
"-",
"1",
")",
"curses",
".",
"init_pair",
"(",
"3",
",",
"curses",
".",
"COLOR_MAGENTA",
",",
"-",
"1",
")",
"curses",
".",
"init_pair",
"(",
"4",
",",
"curses",
".",
"COLOR_CYAN",
",",
"-",
"1",
")",
"curses",
".",
"init_pair",
"(",
"5",
",",
"curses",
".",
"COLOR_GREEN",
",",
"-",
"1",
")",
"curses",
".",
"init_pair",
"(",
"6",
",",
"curses",
".",
"COLOR_BLUE",
",",
"-",
"1",
")",
"curses",
".",
"init_pair",
"(",
"7",
",",
"curses",
".",
"COLOR_RED",
",",
"-",
"1",
")"
] | Set up screen properties | [
"Set",
"up",
"screen",
"properties"
] | 288bc87179ffd986ca066bcd98ea6e0951dd7970 | https://github.com/iiSeymour/game-of-life/blob/288bc87179ffd986ca066bcd98ea6e0951dd7970/conway/gol.py#L81-L96 | train |
iiSeymour/game-of-life | conway/gol.py | gol.patchCurses | def patchCurses(self):
"""
Fix curses addch function for python 3.4.0
"""
if (sys.version_info)[:3] == (3, 4, 0):
self.addchar = lambda y, x, *args: self.win.addch(x, y, *args)
else:
self.addchar = self.win.addch | python | def patchCurses(self):
"""
Fix curses addch function for python 3.4.0
"""
if (sys.version_info)[:3] == (3, 4, 0):
self.addchar = lambda y, x, *args: self.win.addch(x, y, *args)
else:
self.addchar = self.win.addch | [
"def",
"patchCurses",
"(",
"self",
")",
":",
"if",
"(",
"sys",
".",
"version_info",
")",
"[",
":",
"3",
"]",
"==",
"(",
"3",
",",
"4",
",",
"0",
")",
":",
"self",
".",
"addchar",
"=",
"lambda",
"y",
",",
"x",
",",
"*",
"args",
":",
"self",
".",
"win",
".",
"addch",
"(",
"x",
",",
"y",
",",
"*",
"args",
")",
"else",
":",
"self",
".",
"addchar",
"=",
"self",
".",
"win",
".",
"addch"
] | Fix curses addch function for python 3.4.0 | [
"Fix",
"curses",
"addch",
"function",
"for",
"python",
"3",
".",
"4",
".",
"0"
] | 288bc87179ffd986ca066bcd98ea6e0951dd7970 | https://github.com/iiSeymour/game-of-life/blob/288bc87179ffd986ca066bcd98ea6e0951dd7970/conway/gol.py#L98-L105 | train |
iiSeymour/game-of-life | conway/gol.py | gol.splash | def splash(self):
"""
Draw splash screen
"""
dirname = os.path.split(os.path.abspath(__file__))[0]
try:
splash = open(os.path.join(dirname, "splash"), "r").readlines()
except IOError:
return
width = len(max(splash, key=len))
y = int(self.y_grid / 2) - len(splash)
x = int(self.x_grid / 2) - int(width / 2)
if self.x_grid > width:
for i, line in enumerate(splash):
self.win.addstr(y + i, x, line, curses.color_pair(5)) | python | def splash(self):
"""
Draw splash screen
"""
dirname = os.path.split(os.path.abspath(__file__))[0]
try:
splash = open(os.path.join(dirname, "splash"), "r").readlines()
except IOError:
return
width = len(max(splash, key=len))
y = int(self.y_grid / 2) - len(splash)
x = int(self.x_grid / 2) - int(width / 2)
if self.x_grid > width:
for i, line in enumerate(splash):
self.win.addstr(y + i, x, line, curses.color_pair(5)) | [
"def",
"splash",
"(",
"self",
")",
":",
"dirname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
"[",
"0",
"]",
"try",
":",
"splash",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"\"splash\"",
")",
",",
"\"r\"",
")",
".",
"readlines",
"(",
")",
"except",
"IOError",
":",
"return",
"width",
"=",
"len",
"(",
"max",
"(",
"splash",
",",
"key",
"=",
"len",
")",
")",
"y",
"=",
"int",
"(",
"self",
".",
"y_grid",
"/",
"2",
")",
"-",
"len",
"(",
"splash",
")",
"x",
"=",
"int",
"(",
"self",
".",
"x_grid",
"/",
"2",
")",
"-",
"int",
"(",
"width",
"/",
"2",
")",
"if",
"self",
".",
"x_grid",
">",
"width",
":",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"splash",
")",
":",
"self",
".",
"win",
".",
"addstr",
"(",
"y",
"+",
"i",
",",
"x",
",",
"line",
",",
"curses",
".",
"color_pair",
"(",
"5",
")",
")"
] | Draw splash screen | [
"Draw",
"splash",
"screen"
] | 288bc87179ffd986ca066bcd98ea6e0951dd7970 | https://github.com/iiSeymour/game-of-life/blob/288bc87179ffd986ca066bcd98ea6e0951dd7970/conway/gol.py#L110-L126 | train |
iiSeymour/game-of-life | conway/gol.py | gol.drawHUD | def drawHUD(self):
"""
Draw information on population size and current generation
"""
self.win.move(self.height - 2, self.x_pad)
self.win.clrtoeol()
self.win.box()
self.addstr(2, self.x_pad + 1, "Population: %i" % len(self.grid))
self.addstr(3, self.x_pad + 1, "Generation: %s" % self.current_gen)
self.addstr(3, self.x_grid - 21, "s: start p: pause")
self.addstr(2, self.x_grid - 21, "r: restart q: quit") | python | def drawHUD(self):
"""
Draw information on population size and current generation
"""
self.win.move(self.height - 2, self.x_pad)
self.win.clrtoeol()
self.win.box()
self.addstr(2, self.x_pad + 1, "Population: %i" % len(self.grid))
self.addstr(3, self.x_pad + 1, "Generation: %s" % self.current_gen)
self.addstr(3, self.x_grid - 21, "s: start p: pause")
self.addstr(2, self.x_grid - 21, "r: restart q: quit") | [
"def",
"drawHUD",
"(",
"self",
")",
":",
"self",
".",
"win",
".",
"move",
"(",
"self",
".",
"height",
"-",
"2",
",",
"self",
".",
"x_pad",
")",
"self",
".",
"win",
".",
"clrtoeol",
"(",
")",
"self",
".",
"win",
".",
"box",
"(",
")",
"self",
".",
"addstr",
"(",
"2",
",",
"self",
".",
"x_pad",
"+",
"1",
",",
"\"Population: %i\"",
"%",
"len",
"(",
"self",
".",
"grid",
")",
")",
"self",
".",
"addstr",
"(",
"3",
",",
"self",
".",
"x_pad",
"+",
"1",
",",
"\"Generation: %s\"",
"%",
"self",
".",
"current_gen",
")",
"self",
".",
"addstr",
"(",
"3",
",",
"self",
".",
"x_grid",
"-",
"21",
",",
"\"s: start p: pause\"",
")",
"self",
".",
"addstr",
"(",
"2",
",",
"self",
".",
"x_grid",
"-",
"21",
",",
"\"r: restart q: quit\"",
")"
] | Draw information on population size and current generation | [
"Draw",
"information",
"on",
"population",
"size",
"and",
"current",
"generation"
] | 288bc87179ffd986ca066bcd98ea6e0951dd7970 | https://github.com/iiSeymour/game-of-life/blob/288bc87179ffd986ca066bcd98ea6e0951dd7970/conway/gol.py#L128-L138 | train |
iiSeymour/game-of-life | conway/gol.py | gol.drawGrid | def drawGrid(self):
"""
Redraw the grid with the new generation
"""
for cell in self.grid:
y, x = cell
y += self.y_pad
x += self.x_pad
if self.traditional:
sprite = '.'
color = curses.color_pair(4)
else:
sprite = self.char[self.grid[cell] - 1]
color = curses.color_pair(self.grid[cell])
self.addchar(y, x, sprite, color)
self.win.refresh() | python | def drawGrid(self):
"""
Redraw the grid with the new generation
"""
for cell in self.grid:
y, x = cell
y += self.y_pad
x += self.x_pad
if self.traditional:
sprite = '.'
color = curses.color_pair(4)
else:
sprite = self.char[self.grid[cell] - 1]
color = curses.color_pair(self.grid[cell])
self.addchar(y, x, sprite, color)
self.win.refresh() | [
"def",
"drawGrid",
"(",
"self",
")",
":",
"for",
"cell",
"in",
"self",
".",
"grid",
":",
"y",
",",
"x",
"=",
"cell",
"y",
"+=",
"self",
".",
"y_pad",
"x",
"+=",
"self",
".",
"x_pad",
"if",
"self",
".",
"traditional",
":",
"sprite",
"=",
"'.'",
"color",
"=",
"curses",
".",
"color_pair",
"(",
"4",
")",
"else",
":",
"sprite",
"=",
"self",
".",
"char",
"[",
"self",
".",
"grid",
"[",
"cell",
"]",
"-",
"1",
"]",
"color",
"=",
"curses",
".",
"color_pair",
"(",
"self",
".",
"grid",
"[",
"cell",
"]",
")",
"self",
".",
"addchar",
"(",
"y",
",",
"x",
",",
"sprite",
",",
"color",
")",
"self",
".",
"win",
".",
"refresh",
"(",
")"
] | Redraw the grid with the new generation | [
"Redraw",
"the",
"grid",
"with",
"the",
"new",
"generation"
] | 288bc87179ffd986ca066bcd98ea6e0951dd7970 | https://github.com/iiSeymour/game-of-life/blob/288bc87179ffd986ca066bcd98ea6e0951dd7970/conway/gol.py#L140-L158 | train |
iiSeymour/game-of-life | conway/gol.py | gol.nextGen | def nextGen(self):
"""
Decide the fate of the cells
"""
self.current_gen += 1
self.change_gen[self.current_gen % 3] = copy.copy(self.grid)
grid_cp = copy.copy(self.grid)
for cell in self.grid:
y, x = cell
y1 = (y - 1) % self.y_grid
y2 = (y + 1) % self.y_grid
x1 = (x - 1) % self.x_grid
x2 = (x + 1) % self.x_grid
n = self.countNeighbours(cell)
if n < 2 or n > 3:
del grid_cp[cell]
self.addchar(y + self.y_pad, x + self.x_pad, ' ')
else:
grid_cp[cell] = min(self.grid[cell] + 1, self.color_max)
for neighbour in product([y1, y, y2], [x1, x, x2]):
if not self.grid.get(neighbour):
if self.countNeighbours(neighbour) == 3:
y, x = neighbour
y = y % self.y_grid
x = x % self.x_grid
neighbour = y, x
grid_cp[neighbour] = 1
self.grid = grid_cp | python | def nextGen(self):
"""
Decide the fate of the cells
"""
self.current_gen += 1
self.change_gen[self.current_gen % 3] = copy.copy(self.grid)
grid_cp = copy.copy(self.grid)
for cell in self.grid:
y, x = cell
y1 = (y - 1) % self.y_grid
y2 = (y + 1) % self.y_grid
x1 = (x - 1) % self.x_grid
x2 = (x + 1) % self.x_grid
n = self.countNeighbours(cell)
if n < 2 or n > 3:
del grid_cp[cell]
self.addchar(y + self.y_pad, x + self.x_pad, ' ')
else:
grid_cp[cell] = min(self.grid[cell] + 1, self.color_max)
for neighbour in product([y1, y, y2], [x1, x, x2]):
if not self.grid.get(neighbour):
if self.countNeighbours(neighbour) == 3:
y, x = neighbour
y = y % self.y_grid
x = x % self.x_grid
neighbour = y, x
grid_cp[neighbour] = 1
self.grid = grid_cp | [
"def",
"nextGen",
"(",
"self",
")",
":",
"self",
".",
"current_gen",
"+=",
"1",
"self",
".",
"change_gen",
"[",
"self",
".",
"current_gen",
"%",
"3",
"]",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"grid",
")",
"grid_cp",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"grid",
")",
"for",
"cell",
"in",
"self",
".",
"grid",
":",
"y",
",",
"x",
"=",
"cell",
"y1",
"=",
"(",
"y",
"-",
"1",
")",
"%",
"self",
".",
"y_grid",
"y2",
"=",
"(",
"y",
"+",
"1",
")",
"%",
"self",
".",
"y_grid",
"x1",
"=",
"(",
"x",
"-",
"1",
")",
"%",
"self",
".",
"x_grid",
"x2",
"=",
"(",
"x",
"+",
"1",
")",
"%",
"self",
".",
"x_grid",
"n",
"=",
"self",
".",
"countNeighbours",
"(",
"cell",
")",
"if",
"n",
"<",
"2",
"or",
"n",
">",
"3",
":",
"del",
"grid_cp",
"[",
"cell",
"]",
"self",
".",
"addchar",
"(",
"y",
"+",
"self",
".",
"y_pad",
",",
"x",
"+",
"self",
".",
"x_pad",
",",
"' '",
")",
"else",
":",
"grid_cp",
"[",
"cell",
"]",
"=",
"min",
"(",
"self",
".",
"grid",
"[",
"cell",
"]",
"+",
"1",
",",
"self",
".",
"color_max",
")",
"for",
"neighbour",
"in",
"product",
"(",
"[",
"y1",
",",
"y",
",",
"y2",
"]",
",",
"[",
"x1",
",",
"x",
",",
"x2",
"]",
")",
":",
"if",
"not",
"self",
".",
"grid",
".",
"get",
"(",
"neighbour",
")",
":",
"if",
"self",
".",
"countNeighbours",
"(",
"neighbour",
")",
"==",
"3",
":",
"y",
",",
"x",
"=",
"neighbour",
"y",
"=",
"y",
"%",
"self",
".",
"y_grid",
"x",
"=",
"x",
"%",
"self",
".",
"x_grid",
"neighbour",
"=",
"y",
",",
"x",
"grid_cp",
"[",
"neighbour",
"]",
"=",
"1",
"self",
".",
"grid",
"=",
"grid_cp"
] | Decide the fate of the cells | [
"Decide",
"the",
"fate",
"of",
"the",
"cells"
] | 288bc87179ffd986ca066bcd98ea6e0951dd7970 | https://github.com/iiSeymour/game-of-life/blob/288bc87179ffd986ca066bcd98ea6e0951dd7970/conway/gol.py#L160-L191 | train |
iiSeymour/game-of-life | conway/gol.py | gol.countNeighbours | def countNeighbours(self, cell):
"""
Return the number active neighbours within one positions away from cell
"""
count = 0
y, x = cell
y = y % self.y_grid
x = x % self.x_grid
y1 = (y - 1) % self.y_grid
y2 = (y + 1) % self.y_grid
x1 = (x - 1) % self.x_grid
x2 = (x + 1) % self.x_grid
cell = y, x
for neighbour in product([y1, y, y2], [x1, x, x2]):
if neighbour != cell and self.grid.get(neighbour):
count += 1
return count | python | def countNeighbours(self, cell):
"""
Return the number active neighbours within one positions away from cell
"""
count = 0
y, x = cell
y = y % self.y_grid
x = x % self.x_grid
y1 = (y - 1) % self.y_grid
y2 = (y + 1) % self.y_grid
x1 = (x - 1) % self.x_grid
x2 = (x + 1) % self.x_grid
cell = y, x
for neighbour in product([y1, y, y2], [x1, x, x2]):
if neighbour != cell and self.grid.get(neighbour):
count += 1
return count | [
"def",
"countNeighbours",
"(",
"self",
",",
"cell",
")",
":",
"count",
"=",
"0",
"y",
",",
"x",
"=",
"cell",
"y",
"=",
"y",
"%",
"self",
".",
"y_grid",
"x",
"=",
"x",
"%",
"self",
".",
"x_grid",
"y1",
"=",
"(",
"y",
"-",
"1",
")",
"%",
"self",
".",
"y_grid",
"y2",
"=",
"(",
"y",
"+",
"1",
")",
"%",
"self",
".",
"y_grid",
"x1",
"=",
"(",
"x",
"-",
"1",
")",
"%",
"self",
".",
"x_grid",
"x2",
"=",
"(",
"x",
"+",
"1",
")",
"%",
"self",
".",
"x_grid",
"cell",
"=",
"y",
",",
"x",
"for",
"neighbour",
"in",
"product",
"(",
"[",
"y1",
",",
"y",
",",
"y2",
"]",
",",
"[",
"x1",
",",
"x",
",",
"x2",
"]",
")",
":",
"if",
"neighbour",
"!=",
"cell",
"and",
"self",
".",
"grid",
".",
"get",
"(",
"neighbour",
")",
":",
"count",
"+=",
"1",
"return",
"count"
] | Return the number active neighbours within one positions away from cell | [
"Return",
"the",
"number",
"active",
"neighbours",
"within",
"one",
"positions",
"away",
"from",
"cell"
] | 288bc87179ffd986ca066bcd98ea6e0951dd7970 | https://github.com/iiSeymour/game-of-life/blob/288bc87179ffd986ca066bcd98ea6e0951dd7970/conway/gol.py#L193-L210 | train |
iiSeymour/game-of-life | conway/gol.py | gol.initGrid | def initGrid(self):
"""
Initialise the game grid
"""
blinker = [(4, 4), (4, 5), (4, 6)]
toad = [(9, 5), (9, 6), (9, 7), (10, 4), (10, 5), (10, 6)]
glider = [(4, 11), (5, 12), (6, 10), (6, 11), (6, 12)]
r_pentomino = [(10, 60), (9, 61), (10, 61), (11, 61), (9, 62)]
self.grid = {}
if self.test:
for cell in chain(blinker, toad, glider, r_pentomino):
self.grid[cell] = 1
else:
for _ in range(self.initsize):
ry = random.randint(self.y_pad, self.y_grid - 1)
rx = random.randint(self.x_pad, self.x_grid - 1)
self.grid[(ry, rx)] = 1 | python | def initGrid(self):
"""
Initialise the game grid
"""
blinker = [(4, 4), (4, 5), (4, 6)]
toad = [(9, 5), (9, 6), (9, 7), (10, 4), (10, 5), (10, 6)]
glider = [(4, 11), (5, 12), (6, 10), (6, 11), (6, 12)]
r_pentomino = [(10, 60), (9, 61), (10, 61), (11, 61), (9, 62)]
self.grid = {}
if self.test:
for cell in chain(blinker, toad, glider, r_pentomino):
self.grid[cell] = 1
else:
for _ in range(self.initsize):
ry = random.randint(self.y_pad, self.y_grid - 1)
rx = random.randint(self.x_pad, self.x_grid - 1)
self.grid[(ry, rx)] = 1 | [
"def",
"initGrid",
"(",
"self",
")",
":",
"blinker",
"=",
"[",
"(",
"4",
",",
"4",
")",
",",
"(",
"4",
",",
"5",
")",
",",
"(",
"4",
",",
"6",
")",
"]",
"toad",
"=",
"[",
"(",
"9",
",",
"5",
")",
",",
"(",
"9",
",",
"6",
")",
",",
"(",
"9",
",",
"7",
")",
",",
"(",
"10",
",",
"4",
")",
",",
"(",
"10",
",",
"5",
")",
",",
"(",
"10",
",",
"6",
")",
"]",
"glider",
"=",
"[",
"(",
"4",
",",
"11",
")",
",",
"(",
"5",
",",
"12",
")",
",",
"(",
"6",
",",
"10",
")",
",",
"(",
"6",
",",
"11",
")",
",",
"(",
"6",
",",
"12",
")",
"]",
"r_pentomino",
"=",
"[",
"(",
"10",
",",
"60",
")",
",",
"(",
"9",
",",
"61",
")",
",",
"(",
"10",
",",
"61",
")",
",",
"(",
"11",
",",
"61",
")",
",",
"(",
"9",
",",
"62",
")",
"]",
"self",
".",
"grid",
"=",
"{",
"}",
"if",
"self",
".",
"test",
":",
"for",
"cell",
"in",
"chain",
"(",
"blinker",
",",
"toad",
",",
"glider",
",",
"r_pentomino",
")",
":",
"self",
".",
"grid",
"[",
"cell",
"]",
"=",
"1",
"else",
":",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"initsize",
")",
":",
"ry",
"=",
"random",
".",
"randint",
"(",
"self",
".",
"y_pad",
",",
"self",
".",
"y_grid",
"-",
"1",
")",
"rx",
"=",
"random",
".",
"randint",
"(",
"self",
".",
"x_pad",
",",
"self",
".",
"x_grid",
"-",
"1",
")",
"self",
".",
"grid",
"[",
"(",
"ry",
",",
"rx",
")",
"]",
"=",
"1"
] | Initialise the game grid | [
"Initialise",
"the",
"game",
"grid"
] | 288bc87179ffd986ca066bcd98ea6e0951dd7970 | https://github.com/iiSeymour/game-of-life/blob/288bc87179ffd986ca066bcd98ea6e0951dd7970/conway/gol.py#L212-L230 | train |
iiSeymour/game-of-life | conway/gol.py | gol.restart | def restart(self):
"""
Restart the game from a new generation 0
"""
self.initGrid()
self.win.clear()
self.current_gen = 1
self.start | python | def restart(self):
"""
Restart the game from a new generation 0
"""
self.initGrid()
self.win.clear()
self.current_gen = 1
self.start | [
"def",
"restart",
"(",
"self",
")",
":",
"self",
".",
"initGrid",
"(",
")",
"self",
".",
"win",
".",
"clear",
"(",
")",
"self",
".",
"current_gen",
"=",
"1",
"self",
".",
"start"
] | Restart the game from a new generation 0 | [
"Restart",
"the",
"game",
"from",
"a",
"new",
"generation",
"0"
] | 288bc87179ffd986ca066bcd98ea6e0951dd7970 | https://github.com/iiSeymour/game-of-life/blob/288bc87179ffd986ca066bcd98ea6e0951dd7970/conway/gol.py#L278-L285 | train |
iiSeymour/game-of-life | conway/gol.py | gol.end | def end(self):
"""
Game Finished - Restart or Quit
"""
if self.loop:
self.restart
return
self.addstr(2, self.x_grid / 2 - 4, "GAMEOVER", 7)
if self.hud:
self.addstr(2, self.x_pad + 13, len(self.grid), 5)
self.addstr(3, self.x_pad + 13, self.current_gen, 5)
if self.test:
exit()
while self.state == 'stopped':
key = self.win.getch()
if key == ord('q'):
exit()
if key in [ord('s'), ord('r')]:
self.restart | python | def end(self):
"""
Game Finished - Restart or Quit
"""
if self.loop:
self.restart
return
self.addstr(2, self.x_grid / 2 - 4, "GAMEOVER", 7)
if self.hud:
self.addstr(2, self.x_pad + 13, len(self.grid), 5)
self.addstr(3, self.x_pad + 13, self.current_gen, 5)
if self.test:
exit()
while self.state == 'stopped':
key = self.win.getch()
if key == ord('q'):
exit()
if key in [ord('s'), ord('r')]:
self.restart | [
"def",
"end",
"(",
"self",
")",
":",
"if",
"self",
".",
"loop",
":",
"self",
".",
"restart",
"return",
"self",
".",
"addstr",
"(",
"2",
",",
"self",
".",
"x_grid",
"/",
"2",
"-",
"4",
",",
"\"GAMEOVER\"",
",",
"7",
")",
"if",
"self",
".",
"hud",
":",
"self",
".",
"addstr",
"(",
"2",
",",
"self",
".",
"x_pad",
"+",
"13",
",",
"len",
"(",
"self",
".",
"grid",
")",
",",
"5",
")",
"self",
".",
"addstr",
"(",
"3",
",",
"self",
".",
"x_pad",
"+",
"13",
",",
"self",
".",
"current_gen",
",",
"5",
")",
"if",
"self",
".",
"test",
":",
"exit",
"(",
")",
"while",
"self",
".",
"state",
"==",
"'stopped'",
":",
"key",
"=",
"self",
".",
"win",
".",
"getch",
"(",
")",
"if",
"key",
"==",
"ord",
"(",
"'q'",
")",
":",
"exit",
"(",
")",
"if",
"key",
"in",
"[",
"ord",
"(",
"'s'",
")",
",",
"ord",
"(",
"'r'",
")",
"]",
":",
"self",
".",
"restart"
] | Game Finished - Restart or Quit | [
"Game",
"Finished",
"-",
"Restart",
"or",
"Quit"
] | 288bc87179ffd986ca066bcd98ea6e0951dd7970 | https://github.com/iiSeymour/game-of-life/blob/288bc87179ffd986ca066bcd98ea6e0951dd7970/conway/gol.py#L288-L309 | train |
guaix-ucm/numina | numina/array/combine.py | mean | def mean(arrays, masks=None, dtype=None, out=None,
zeros=None, scales=None,
weights=None):
"""Combine arrays using the mean, with masks and offsets.
Arrays and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:return: mean, variance of the mean and number of points stored
Example:
>>> import numpy
>>> image = numpy.array([[1., 3.], [1., -1.4]])
>>> inputs = [image, image + 1]
>>> mean(inputs)
array([[[ 1.5, 3.5],
[ 1.5, -0.9]],
<BLANKLINE>
[[ 0.5, 0.5],
[ 0.5, 0.5]],
<BLANKLINE>
[[ 2. , 2. ],
[ 2. , 2. ]]])
"""
return generic_combine(intl_combine.mean_method(), arrays, masks=masks,
dtype=dtype, out=out,
zeros=zeros, scales=scales,
weights=weights) | python | def mean(arrays, masks=None, dtype=None, out=None,
zeros=None, scales=None,
weights=None):
"""Combine arrays using the mean, with masks and offsets.
Arrays and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:return: mean, variance of the mean and number of points stored
Example:
>>> import numpy
>>> image = numpy.array([[1., 3.], [1., -1.4]])
>>> inputs = [image, image + 1]
>>> mean(inputs)
array([[[ 1.5, 3.5],
[ 1.5, -0.9]],
<BLANKLINE>
[[ 0.5, 0.5],
[ 0.5, 0.5]],
<BLANKLINE>
[[ 2. , 2. ],
[ 2. , 2. ]]])
"""
return generic_combine(intl_combine.mean_method(), arrays, masks=masks,
dtype=dtype, out=out,
zeros=zeros, scales=scales,
weights=weights) | [
"def",
"mean",
"(",
"arrays",
",",
"masks",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"out",
"=",
"None",
",",
"zeros",
"=",
"None",
",",
"scales",
"=",
"None",
",",
"weights",
"=",
"None",
")",
":",
"return",
"generic_combine",
"(",
"intl_combine",
".",
"mean_method",
"(",
")",
",",
"arrays",
",",
"masks",
"=",
"masks",
",",
"dtype",
"=",
"dtype",
",",
"out",
"=",
"out",
",",
"zeros",
"=",
"zeros",
",",
"scales",
"=",
"scales",
",",
"weights",
"=",
"weights",
")"
] | Combine arrays using the mean, with masks and offsets.
Arrays and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:return: mean, variance of the mean and number of points stored
Example:
>>> import numpy
>>> image = numpy.array([[1., 3.], [1., -1.4]])
>>> inputs = [image, image + 1]
>>> mean(inputs)
array([[[ 1.5, 3.5],
[ 1.5, -0.9]],
<BLANKLINE>
[[ 0.5, 0.5],
[ 0.5, 0.5]],
<BLANKLINE>
[[ 2. , 2. ],
[ 2. , 2. ]]]) | [
"Combine",
"arrays",
"using",
"the",
"mean",
"with",
"masks",
"and",
"offsets",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/combine.py#L22-L59 | train |
guaix-ucm/numina | numina/array/combine.py | median | def median(arrays, masks=None, dtype=None, out=None,
zeros=None, scales=None,
weights=None):
"""Combine arrays using the median, with masks.
Arrays and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:return: median, variance of the median and number of points stored
"""
return generic_combine(intl_combine.median_method(), arrays, masks=masks,
dtype=dtype, out=out,
zeros=zeros, scales=scales, weights=weights) | python | def median(arrays, masks=None, dtype=None, out=None,
zeros=None, scales=None,
weights=None):
"""Combine arrays using the median, with masks.
Arrays and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:return: median, variance of the median and number of points stored
"""
return generic_combine(intl_combine.median_method(), arrays, masks=masks,
dtype=dtype, out=out,
zeros=zeros, scales=scales, weights=weights) | [
"def",
"median",
"(",
"arrays",
",",
"masks",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"out",
"=",
"None",
",",
"zeros",
"=",
"None",
",",
"scales",
"=",
"None",
",",
"weights",
"=",
"None",
")",
":",
"return",
"generic_combine",
"(",
"intl_combine",
".",
"median_method",
"(",
")",
",",
"arrays",
",",
"masks",
"=",
"masks",
",",
"dtype",
"=",
"dtype",
",",
"out",
"=",
"out",
",",
"zeros",
"=",
"zeros",
",",
"scales",
"=",
"scales",
",",
"weights",
"=",
"weights",
")"
] | Combine arrays using the median, with masks.
Arrays and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:return: median, variance of the median and number of points stored | [
"Combine",
"arrays",
"using",
"the",
"median",
"with",
"masks",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/combine.py#L62-L85 | train |
Subsets and Splits