repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
PyAr/fades
|
fades/envbuilder.py
|
_FadesEnvBuilder.create_env
|
def create_env(self, interpreter, is_current, options):
"""Create the virtualenv and return its info."""
if is_current:
# apply pyvenv options
pyvenv_options = options['pyvenv_options']
if "--system-site-packages" in pyvenv_options:
self.system_site_packages = True
logger.debug("Creating virtualenv with pyvenv. options=%s", pyvenv_options)
self.create(self.env_path)
else:
virtualenv_options = options['virtualenv_options']
logger.debug("Creating virtualenv with virtualenv")
self.create_with_virtualenv(interpreter, virtualenv_options)
logger.debug("env_bin_path: %s", self.env_bin_path)
# Re check if pip was installed (supporting both binary and .exe for Windows)
pip_bin = os.path.join(self.env_bin_path, "pip")
pip_exe = os.path.join(self.env_bin_path, "pip.exe")
if not (os.path.exists(pip_bin) or os.path.exists(pip_exe)):
logger.debug("pip isn't installed in the venv, setting pip_installed=False")
self.pip_installed = False
return self.env_path, self.env_bin_path, self.pip_installed
|
python
|
def create_env(self, interpreter, is_current, options):
"""Create the virtualenv and return its info."""
if is_current:
# apply pyvenv options
pyvenv_options = options['pyvenv_options']
if "--system-site-packages" in pyvenv_options:
self.system_site_packages = True
logger.debug("Creating virtualenv with pyvenv. options=%s", pyvenv_options)
self.create(self.env_path)
else:
virtualenv_options = options['virtualenv_options']
logger.debug("Creating virtualenv with virtualenv")
self.create_with_virtualenv(interpreter, virtualenv_options)
logger.debug("env_bin_path: %s", self.env_bin_path)
# Re check if pip was installed (supporting both binary and .exe for Windows)
pip_bin = os.path.join(self.env_bin_path, "pip")
pip_exe = os.path.join(self.env_bin_path, "pip.exe")
if not (os.path.exists(pip_bin) or os.path.exists(pip_exe)):
logger.debug("pip isn't installed in the venv, setting pip_installed=False")
self.pip_installed = False
return self.env_path, self.env_bin_path, self.pip_installed
|
[
"def",
"create_env",
"(",
"self",
",",
"interpreter",
",",
"is_current",
",",
"options",
")",
":",
"if",
"is_current",
":",
"# apply pyvenv options",
"pyvenv_options",
"=",
"options",
"[",
"'pyvenv_options'",
"]",
"if",
"\"--system-site-packages\"",
"in",
"pyvenv_options",
":",
"self",
".",
"system_site_packages",
"=",
"True",
"logger",
".",
"debug",
"(",
"\"Creating virtualenv with pyvenv. options=%s\"",
",",
"pyvenv_options",
")",
"self",
".",
"create",
"(",
"self",
".",
"env_path",
")",
"else",
":",
"virtualenv_options",
"=",
"options",
"[",
"'virtualenv_options'",
"]",
"logger",
".",
"debug",
"(",
"\"Creating virtualenv with virtualenv\"",
")",
"self",
".",
"create_with_virtualenv",
"(",
"interpreter",
",",
"virtualenv_options",
")",
"logger",
".",
"debug",
"(",
"\"env_bin_path: %s\"",
",",
"self",
".",
"env_bin_path",
")",
"# Re check if pip was installed (supporting both binary and .exe for Windows)",
"pip_bin",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"env_bin_path",
",",
"\"pip\"",
")",
"pip_exe",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"env_bin_path",
",",
"\"pip.exe\"",
")",
"if",
"not",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"pip_bin",
")",
"or",
"os",
".",
"path",
".",
"exists",
"(",
"pip_exe",
")",
")",
":",
"logger",
".",
"debug",
"(",
"\"pip isn't installed in the venv, setting pip_installed=False\"",
")",
"self",
".",
"pip_installed",
"=",
"False",
"return",
"self",
".",
"env_path",
",",
"self",
".",
"env_bin_path",
",",
"self",
".",
"pip_installed"
] |
Create the virtualenv and return its info.
|
[
"Create",
"the",
"virtualenv",
"and",
"return",
"its",
"info",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/envbuilder.py#L95-L117
|
train
|
PyAr/fades
|
fades/envbuilder.py
|
UsageManager.store_usage_stat
|
def store_usage_stat(self, venv_data, cache):
"""Log an usage record for venv_data."""
with open(self.stat_file_path, 'at') as f:
self._write_venv_usage(f, venv_data)
|
python
|
def store_usage_stat(self, venv_data, cache):
"""Log an usage record for venv_data."""
with open(self.stat_file_path, 'at') as f:
self._write_venv_usage(f, venv_data)
|
[
"def",
"store_usage_stat",
"(",
"self",
",",
"venv_data",
",",
"cache",
")",
":",
"with",
"open",
"(",
"self",
".",
"stat_file_path",
",",
"'at'",
")",
"as",
"f",
":",
"self",
".",
"_write_venv_usage",
"(",
"f",
",",
"venv_data",
")"
] |
Log an usage record for venv_data.
|
[
"Log",
"an",
"usage",
"record",
"for",
"venv_data",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/envbuilder.py#L192-L195
|
train
|
PyAr/fades
|
fades/envbuilder.py
|
UsageManager.clean_unused_venvs
|
def clean_unused_venvs(self, max_days_to_keep):
"""Compact usage stats and remove venvs.
This method loads the complete file usage in memory, for every venv compact all records in
one (the lastest), updates this info for every env deleted and, finally, write the entire
file to disk.
If something failed during this steps, usage file remains unchanged and can contain some
data about some deleted env. This is not a problem, the next time this function it's
called, this records will be deleted.
"""
with filelock(self.stat_file_lock):
now = datetime.utcnow()
venvs_dict = self._get_compacted_dict_usage_from_file()
for venv_uuid, usage_date in venvs_dict.copy().items():
usage_date = self._str_to_datetime(usage_date)
if (now - usage_date).days > max_days_to_keep:
# remove venv from usage dict
del venvs_dict[venv_uuid]
venv_meta = self.venvscache.get_venv(uuid=venv_uuid)
if venv_meta is None:
# if meta isn't found means that something had failed previously and
# usage_file wasn't updated.
continue
env_path = venv_meta['env_path']
logger.info("Destroying virtualenv at: %s", env_path) # #256
destroy_venv(env_path, self.venvscache)
self._write_compacted_dict_usage_to_file(venvs_dict)
|
python
|
def clean_unused_venvs(self, max_days_to_keep):
"""Compact usage stats and remove venvs.
This method loads the complete file usage in memory, for every venv compact all records in
one (the lastest), updates this info for every env deleted and, finally, write the entire
file to disk.
If something failed during this steps, usage file remains unchanged and can contain some
data about some deleted env. This is not a problem, the next time this function it's
called, this records will be deleted.
"""
with filelock(self.stat_file_lock):
now = datetime.utcnow()
venvs_dict = self._get_compacted_dict_usage_from_file()
for venv_uuid, usage_date in venvs_dict.copy().items():
usage_date = self._str_to_datetime(usage_date)
if (now - usage_date).days > max_days_to_keep:
# remove venv from usage dict
del venvs_dict[venv_uuid]
venv_meta = self.venvscache.get_venv(uuid=venv_uuid)
if venv_meta is None:
# if meta isn't found means that something had failed previously and
# usage_file wasn't updated.
continue
env_path = venv_meta['env_path']
logger.info("Destroying virtualenv at: %s", env_path) # #256
destroy_venv(env_path, self.venvscache)
self._write_compacted_dict_usage_to_file(venvs_dict)
|
[
"def",
"clean_unused_venvs",
"(",
"self",
",",
"max_days_to_keep",
")",
":",
"with",
"filelock",
"(",
"self",
".",
"stat_file_lock",
")",
":",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"venvs_dict",
"=",
"self",
".",
"_get_compacted_dict_usage_from_file",
"(",
")",
"for",
"venv_uuid",
",",
"usage_date",
"in",
"venvs_dict",
".",
"copy",
"(",
")",
".",
"items",
"(",
")",
":",
"usage_date",
"=",
"self",
".",
"_str_to_datetime",
"(",
"usage_date",
")",
"if",
"(",
"now",
"-",
"usage_date",
")",
".",
"days",
">",
"max_days_to_keep",
":",
"# remove venv from usage dict",
"del",
"venvs_dict",
"[",
"venv_uuid",
"]",
"venv_meta",
"=",
"self",
".",
"venvscache",
".",
"get_venv",
"(",
"uuid",
"=",
"venv_uuid",
")",
"if",
"venv_meta",
"is",
"None",
":",
"# if meta isn't found means that something had failed previously and",
"# usage_file wasn't updated.",
"continue",
"env_path",
"=",
"venv_meta",
"[",
"'env_path'",
"]",
"logger",
".",
"info",
"(",
"\"Destroying virtualenv at: %s\"",
",",
"env_path",
")",
"# #256",
"destroy_venv",
"(",
"env_path",
",",
"self",
".",
"venvscache",
")",
"self",
".",
"_write_compacted_dict_usage_to_file",
"(",
"venvs_dict",
")"
] |
Compact usage stats and remove venvs.
This method loads the complete file usage in memory, for every venv compact all records in
one (the lastest), updates this info for every env deleted and, finally, write the entire
file to disk.
If something failed during this steps, usage file remains unchanged and can contain some
data about some deleted env. This is not a problem, the next time this function it's
called, this records will be deleted.
|
[
"Compact",
"usage",
"stats",
"and",
"remove",
"venvs",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/envbuilder.py#L214-L242
|
train
|
PyAr/fades
|
fades/helpers.py
|
logged_exec
|
def logged_exec(cmd):
"""Execute a command, redirecting the output to the log."""
logger = logging.getLogger('fades.exec')
logger.debug("Executing external command: %r", cmd)
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
stdout = []
for line in p.stdout:
line = line[:-1]
stdout.append(line)
logger.debug(STDOUT_LOG_PREFIX + line)
retcode = p.wait()
if retcode:
raise ExecutionError(retcode, cmd, stdout)
return stdout
|
python
|
def logged_exec(cmd):
"""Execute a command, redirecting the output to the log."""
logger = logging.getLogger('fades.exec')
logger.debug("Executing external command: %r", cmd)
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
stdout = []
for line in p.stdout:
line = line[:-1]
stdout.append(line)
logger.debug(STDOUT_LOG_PREFIX + line)
retcode = p.wait()
if retcode:
raise ExecutionError(retcode, cmd, stdout)
return stdout
|
[
"def",
"logged_exec",
"(",
"cmd",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'fades.exec'",
")",
"logger",
".",
"debug",
"(",
"\"Executing external command: %r\"",
",",
"cmd",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"universal_newlines",
"=",
"True",
")",
"stdout",
"=",
"[",
"]",
"for",
"line",
"in",
"p",
".",
"stdout",
":",
"line",
"=",
"line",
"[",
":",
"-",
"1",
"]",
"stdout",
".",
"append",
"(",
"line",
")",
"logger",
".",
"debug",
"(",
"STDOUT_LOG_PREFIX",
"+",
"line",
")",
"retcode",
"=",
"p",
".",
"wait",
"(",
")",
"if",
"retcode",
":",
"raise",
"ExecutionError",
"(",
"retcode",
",",
"cmd",
",",
"stdout",
")",
"return",
"stdout"
] |
Execute a command, redirecting the output to the log.
|
[
"Execute",
"a",
"command",
"redirecting",
"the",
"output",
"to",
"the",
"log",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L72-L86
|
train
|
PyAr/fades
|
fades/helpers.py
|
_get_specific_dir
|
def _get_specific_dir(dir_type):
"""Get a specific directory, using some XDG base, with sensible default."""
if SNAP_BASEDIR_NAME in os.environ:
logger.debug("Getting base dir information from SNAP_BASEDIR_NAME env var.")
direct = os.path.join(os.environ[SNAP_BASEDIR_NAME], dir_type)
else:
try:
basedirectory = _get_basedirectory()
except ImportError:
logger.debug("Using last resort base dir: ~/.fades")
from os.path import expanduser
direct = os.path.join(expanduser("~"), ".fades")
else:
xdg_attrib = 'xdg_{}_home'.format(dir_type)
base = getattr(basedirectory, xdg_attrib)
direct = os.path.join(base, 'fades')
if not os.path.exists(direct):
os.makedirs(direct)
return direct
|
python
|
def _get_specific_dir(dir_type):
"""Get a specific directory, using some XDG base, with sensible default."""
if SNAP_BASEDIR_NAME in os.environ:
logger.debug("Getting base dir information from SNAP_BASEDIR_NAME env var.")
direct = os.path.join(os.environ[SNAP_BASEDIR_NAME], dir_type)
else:
try:
basedirectory = _get_basedirectory()
except ImportError:
logger.debug("Using last resort base dir: ~/.fades")
from os.path import expanduser
direct = os.path.join(expanduser("~"), ".fades")
else:
xdg_attrib = 'xdg_{}_home'.format(dir_type)
base = getattr(basedirectory, xdg_attrib)
direct = os.path.join(base, 'fades')
if not os.path.exists(direct):
os.makedirs(direct)
return direct
|
[
"def",
"_get_specific_dir",
"(",
"dir_type",
")",
":",
"if",
"SNAP_BASEDIR_NAME",
"in",
"os",
".",
"environ",
":",
"logger",
".",
"debug",
"(",
"\"Getting base dir information from SNAP_BASEDIR_NAME env var.\"",
")",
"direct",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
"[",
"SNAP_BASEDIR_NAME",
"]",
",",
"dir_type",
")",
"else",
":",
"try",
":",
"basedirectory",
"=",
"_get_basedirectory",
"(",
")",
"except",
"ImportError",
":",
"logger",
".",
"debug",
"(",
"\"Using last resort base dir: ~/.fades\"",
")",
"from",
"os",
".",
"path",
"import",
"expanduser",
"direct",
"=",
"os",
".",
"path",
".",
"join",
"(",
"expanduser",
"(",
"\"~\"",
")",
",",
"\".fades\"",
")",
"else",
":",
"xdg_attrib",
"=",
"'xdg_{}_home'",
".",
"format",
"(",
"dir_type",
")",
"base",
"=",
"getattr",
"(",
"basedirectory",
",",
"xdg_attrib",
")",
"direct",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base",
",",
"'fades'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"direct",
")",
":",
"os",
".",
"makedirs",
"(",
"direct",
")",
"return",
"direct"
] |
Get a specific directory, using some XDG base, with sensible default.
|
[
"Get",
"a",
"specific",
"directory",
"using",
"some",
"XDG",
"base",
"with",
"sensible",
"default",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L94-L113
|
train
|
PyAr/fades
|
fades/helpers.py
|
_get_interpreter_info
|
def _get_interpreter_info(interpreter=None):
"""Return the interpreter's full path using pythonX.Y format."""
if interpreter is None:
# If interpreter is None by default returns the current interpreter data.
major, minor = sys.version_info[:2]
executable = sys.executable
else:
args = [interpreter, '-c', SHOW_VERSION_CMD]
try:
requested_interpreter_info = logged_exec(args)
except Exception as error:
logger.error("Error getting requested interpreter version: %s", error)
raise FadesError("Could not get interpreter version")
requested_interpreter_info = json.loads(requested_interpreter_info[0])
executable = requested_interpreter_info['path']
major = requested_interpreter_info['major']
minor = requested_interpreter_info['minor']
if executable[-1].isdigit():
executable = executable.split(".")[0][:-1]
interpreter = "{}{}.{}".format(executable, major, minor)
return interpreter
|
python
|
def _get_interpreter_info(interpreter=None):
"""Return the interpreter's full path using pythonX.Y format."""
if interpreter is None:
# If interpreter is None by default returns the current interpreter data.
major, minor = sys.version_info[:2]
executable = sys.executable
else:
args = [interpreter, '-c', SHOW_VERSION_CMD]
try:
requested_interpreter_info = logged_exec(args)
except Exception as error:
logger.error("Error getting requested interpreter version: %s", error)
raise FadesError("Could not get interpreter version")
requested_interpreter_info = json.loads(requested_interpreter_info[0])
executable = requested_interpreter_info['path']
major = requested_interpreter_info['major']
minor = requested_interpreter_info['minor']
if executable[-1].isdigit():
executable = executable.split(".")[0][:-1]
interpreter = "{}{}.{}".format(executable, major, minor)
return interpreter
|
[
"def",
"_get_interpreter_info",
"(",
"interpreter",
"=",
"None",
")",
":",
"if",
"interpreter",
"is",
"None",
":",
"# If interpreter is None by default returns the current interpreter data.",
"major",
",",
"minor",
"=",
"sys",
".",
"version_info",
"[",
":",
"2",
"]",
"executable",
"=",
"sys",
".",
"executable",
"else",
":",
"args",
"=",
"[",
"interpreter",
",",
"'-c'",
",",
"SHOW_VERSION_CMD",
"]",
"try",
":",
"requested_interpreter_info",
"=",
"logged_exec",
"(",
"args",
")",
"except",
"Exception",
"as",
"error",
":",
"logger",
".",
"error",
"(",
"\"Error getting requested interpreter version: %s\"",
",",
"error",
")",
"raise",
"FadesError",
"(",
"\"Could not get interpreter version\"",
")",
"requested_interpreter_info",
"=",
"json",
".",
"loads",
"(",
"requested_interpreter_info",
"[",
"0",
"]",
")",
"executable",
"=",
"requested_interpreter_info",
"[",
"'path'",
"]",
"major",
"=",
"requested_interpreter_info",
"[",
"'major'",
"]",
"minor",
"=",
"requested_interpreter_info",
"[",
"'minor'",
"]",
"if",
"executable",
"[",
"-",
"1",
"]",
".",
"isdigit",
"(",
")",
":",
"executable",
"=",
"executable",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"[",
":",
"-",
"1",
"]",
"interpreter",
"=",
"\"{}{}.{}\"",
".",
"format",
"(",
"executable",
",",
"major",
",",
"minor",
")",
"return",
"interpreter"
] |
Return the interpreter's full path using pythonX.Y format.
|
[
"Return",
"the",
"interpreter",
"s",
"full",
"path",
"using",
"pythonX",
".",
"Y",
"format",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L126-L146
|
train
|
PyAr/fades
|
fades/helpers.py
|
get_interpreter_version
|
def get_interpreter_version(requested_interpreter):
"""Return a 'sanitized' interpreter and indicates if it is the current one."""
logger.debug('Getting interpreter version for: %s', requested_interpreter)
current_interpreter = _get_interpreter_info()
logger.debug('Current interpreter is %s', current_interpreter)
if requested_interpreter is None:
return(current_interpreter, True)
else:
requested_interpreter = _get_interpreter_info(requested_interpreter)
is_current = requested_interpreter == current_interpreter
logger.debug('Interpreter=%s. It is the same as fades?=%s',
requested_interpreter, is_current)
return (requested_interpreter, is_current)
|
python
|
def get_interpreter_version(requested_interpreter):
"""Return a 'sanitized' interpreter and indicates if it is the current one."""
logger.debug('Getting interpreter version for: %s', requested_interpreter)
current_interpreter = _get_interpreter_info()
logger.debug('Current interpreter is %s', current_interpreter)
if requested_interpreter is None:
return(current_interpreter, True)
else:
requested_interpreter = _get_interpreter_info(requested_interpreter)
is_current = requested_interpreter == current_interpreter
logger.debug('Interpreter=%s. It is the same as fades?=%s',
requested_interpreter, is_current)
return (requested_interpreter, is_current)
|
[
"def",
"get_interpreter_version",
"(",
"requested_interpreter",
")",
":",
"logger",
".",
"debug",
"(",
"'Getting interpreter version for: %s'",
",",
"requested_interpreter",
")",
"current_interpreter",
"=",
"_get_interpreter_info",
"(",
")",
"logger",
".",
"debug",
"(",
"'Current interpreter is %s'",
",",
"current_interpreter",
")",
"if",
"requested_interpreter",
"is",
"None",
":",
"return",
"(",
"current_interpreter",
",",
"True",
")",
"else",
":",
"requested_interpreter",
"=",
"_get_interpreter_info",
"(",
"requested_interpreter",
")",
"is_current",
"=",
"requested_interpreter",
"==",
"current_interpreter",
"logger",
".",
"debug",
"(",
"'Interpreter=%s. It is the same as fades?=%s'",
",",
"requested_interpreter",
",",
"is_current",
")",
"return",
"(",
"requested_interpreter",
",",
"is_current",
")"
] |
Return a 'sanitized' interpreter and indicates if it is the current one.
|
[
"Return",
"a",
"sanitized",
"interpreter",
"and",
"indicates",
"if",
"it",
"is",
"the",
"current",
"one",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L149-L161
|
train
|
PyAr/fades
|
fades/helpers.py
|
check_pypi_updates
|
def check_pypi_updates(dependencies):
"""Return a list of dependencies to upgrade."""
dependencies_up_to_date = []
for dependency in dependencies.get('pypi', []):
# get latest version from PyPI api
try:
latest_version = get_latest_version_number(dependency.project_name)
except Exception as error:
logger.warning("--check-updates command will be aborted. Error: %s", error)
return dependencies
# get required version
required_version = None
if dependency.specs:
_, required_version = dependency.specs[0]
if required_version:
dependencies_up_to_date.append(dependency)
if latest_version > required_version:
logger.info("There is a new version of %s: %s",
dependency.project_name, latest_version)
elif latest_version < required_version:
logger.warning("The requested version for %s is greater "
"than latest found in PyPI: %s",
dependency.project_name, latest_version)
else:
logger.info("The requested version for %s is the latest one in PyPI: %s",
dependency.project_name, latest_version)
else:
project_name_plus = "{}=={}".format(dependency.project_name, latest_version)
dependencies_up_to_date.append(pkg_resources.Requirement.parse(project_name_plus))
logger.info("The latest version of %r is %s and will use it.",
dependency.project_name, latest_version)
dependencies["pypi"] = dependencies_up_to_date
return dependencies
|
python
|
def check_pypi_updates(dependencies):
"""Return a list of dependencies to upgrade."""
dependencies_up_to_date = []
for dependency in dependencies.get('pypi', []):
# get latest version from PyPI api
try:
latest_version = get_latest_version_number(dependency.project_name)
except Exception as error:
logger.warning("--check-updates command will be aborted. Error: %s", error)
return dependencies
# get required version
required_version = None
if dependency.specs:
_, required_version = dependency.specs[0]
if required_version:
dependencies_up_to_date.append(dependency)
if latest_version > required_version:
logger.info("There is a new version of %s: %s",
dependency.project_name, latest_version)
elif latest_version < required_version:
logger.warning("The requested version for %s is greater "
"than latest found in PyPI: %s",
dependency.project_name, latest_version)
else:
logger.info("The requested version for %s is the latest one in PyPI: %s",
dependency.project_name, latest_version)
else:
project_name_plus = "{}=={}".format(dependency.project_name, latest_version)
dependencies_up_to_date.append(pkg_resources.Requirement.parse(project_name_plus))
logger.info("The latest version of %r is %s and will use it.",
dependency.project_name, latest_version)
dependencies["pypi"] = dependencies_up_to_date
return dependencies
|
[
"def",
"check_pypi_updates",
"(",
"dependencies",
")",
":",
"dependencies_up_to_date",
"=",
"[",
"]",
"for",
"dependency",
"in",
"dependencies",
".",
"get",
"(",
"'pypi'",
",",
"[",
"]",
")",
":",
"# get latest version from PyPI api",
"try",
":",
"latest_version",
"=",
"get_latest_version_number",
"(",
"dependency",
".",
"project_name",
")",
"except",
"Exception",
"as",
"error",
":",
"logger",
".",
"warning",
"(",
"\"--check-updates command will be aborted. Error: %s\"",
",",
"error",
")",
"return",
"dependencies",
"# get required version",
"required_version",
"=",
"None",
"if",
"dependency",
".",
"specs",
":",
"_",
",",
"required_version",
"=",
"dependency",
".",
"specs",
"[",
"0",
"]",
"if",
"required_version",
":",
"dependencies_up_to_date",
".",
"append",
"(",
"dependency",
")",
"if",
"latest_version",
">",
"required_version",
":",
"logger",
".",
"info",
"(",
"\"There is a new version of %s: %s\"",
",",
"dependency",
".",
"project_name",
",",
"latest_version",
")",
"elif",
"latest_version",
"<",
"required_version",
":",
"logger",
".",
"warning",
"(",
"\"The requested version for %s is greater \"",
"\"than latest found in PyPI: %s\"",
",",
"dependency",
".",
"project_name",
",",
"latest_version",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"The requested version for %s is the latest one in PyPI: %s\"",
",",
"dependency",
".",
"project_name",
",",
"latest_version",
")",
"else",
":",
"project_name_plus",
"=",
"\"{}=={}\"",
".",
"format",
"(",
"dependency",
".",
"project_name",
",",
"latest_version",
")",
"dependencies_up_to_date",
".",
"append",
"(",
"pkg_resources",
".",
"Requirement",
".",
"parse",
"(",
"project_name_plus",
")",
")",
"logger",
".",
"info",
"(",
"\"The latest version of %r is %s and will use it.\"",
",",
"dependency",
".",
"project_name",
",",
"latest_version",
")",
"dependencies",
"[",
"\"pypi\"",
"]",
"=",
"dependencies_up_to_date",
"return",
"dependencies"
] |
Return a list of dependencies to upgrade.
|
[
"Return",
"a",
"list",
"of",
"dependencies",
"to",
"upgrade",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L180-L214
|
train
|
PyAr/fades
|
fades/helpers.py
|
_pypi_head_package
|
def _pypi_head_package(dependency):
"""Hit pypi with a http HEAD to check if pkg_name exists."""
if dependency.specs:
_, version = dependency.specs[0]
url = BASE_PYPI_URL_WITH_VERSION.format(name=dependency.project_name, version=version)
else:
url = BASE_PYPI_URL.format(name=dependency.project_name)
logger.debug("Doing HEAD requests against %s", url)
req = request.Request(url, method='HEAD')
try:
response = request.urlopen(req)
except HTTPError as http_error:
if http_error.code == HTTP_STATUS_NOT_FOUND:
return False
else:
raise
if response.status == HTTP_STATUS_OK:
logger.debug("%r exists in PyPI.", dependency)
return True
else:
# Maybe we are getting somethink like a redirect. In this case we are only
# warning to the user and trying to install the dependency.
# In the worst scenery fades will fail to install it.
logger.warning("Got a (unexpected) HTTP_STATUS=%r and reason=%r checking if %r exists",
response.status, response.reason, dependency)
return True
|
python
|
def _pypi_head_package(dependency):
"""Hit pypi with a http HEAD to check if pkg_name exists."""
if dependency.specs:
_, version = dependency.specs[0]
url = BASE_PYPI_URL_WITH_VERSION.format(name=dependency.project_name, version=version)
else:
url = BASE_PYPI_URL.format(name=dependency.project_name)
logger.debug("Doing HEAD requests against %s", url)
req = request.Request(url, method='HEAD')
try:
response = request.urlopen(req)
except HTTPError as http_error:
if http_error.code == HTTP_STATUS_NOT_FOUND:
return False
else:
raise
if response.status == HTTP_STATUS_OK:
logger.debug("%r exists in PyPI.", dependency)
return True
else:
# Maybe we are getting somethink like a redirect. In this case we are only
# warning to the user and trying to install the dependency.
# In the worst scenery fades will fail to install it.
logger.warning("Got a (unexpected) HTTP_STATUS=%r and reason=%r checking if %r exists",
response.status, response.reason, dependency)
return True
|
[
"def",
"_pypi_head_package",
"(",
"dependency",
")",
":",
"if",
"dependency",
".",
"specs",
":",
"_",
",",
"version",
"=",
"dependency",
".",
"specs",
"[",
"0",
"]",
"url",
"=",
"BASE_PYPI_URL_WITH_VERSION",
".",
"format",
"(",
"name",
"=",
"dependency",
".",
"project_name",
",",
"version",
"=",
"version",
")",
"else",
":",
"url",
"=",
"BASE_PYPI_URL",
".",
"format",
"(",
"name",
"=",
"dependency",
".",
"project_name",
")",
"logger",
".",
"debug",
"(",
"\"Doing HEAD requests against %s\"",
",",
"url",
")",
"req",
"=",
"request",
".",
"Request",
"(",
"url",
",",
"method",
"=",
"'HEAD'",
")",
"try",
":",
"response",
"=",
"request",
".",
"urlopen",
"(",
"req",
")",
"except",
"HTTPError",
"as",
"http_error",
":",
"if",
"http_error",
".",
"code",
"==",
"HTTP_STATUS_NOT_FOUND",
":",
"return",
"False",
"else",
":",
"raise",
"if",
"response",
".",
"status",
"==",
"HTTP_STATUS_OK",
":",
"logger",
".",
"debug",
"(",
"\"%r exists in PyPI.\"",
",",
"dependency",
")",
"return",
"True",
"else",
":",
"# Maybe we are getting somethink like a redirect. In this case we are only",
"# warning to the user and trying to install the dependency.",
"# In the worst scenery fades will fail to install it.",
"logger",
".",
"warning",
"(",
"\"Got a (unexpected) HTTP_STATUS=%r and reason=%r checking if %r exists\"",
",",
"response",
".",
"status",
",",
"response",
".",
"reason",
",",
"dependency",
")",
"return",
"True"
] |
Hit pypi with a http HEAD to check if pkg_name exists.
|
[
"Hit",
"pypi",
"with",
"a",
"http",
"HEAD",
"to",
"check",
"if",
"pkg_name",
"exists",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L217-L242
|
train
|
PyAr/fades
|
fades/helpers.py
|
check_pypi_exists
|
def check_pypi_exists(dependencies):
"""Check if the indicated dependencies actually exists in pypi."""
for dependency in dependencies.get('pypi', []):
logger.debug("Checking if %r exists in PyPI", dependency)
try:
exists = _pypi_head_package(dependency)
except Exception as error:
logger.error("Error checking %s in PyPI: %r", dependency, error)
raise FadesError("Could not check if dependency exists in PyPI")
else:
if not exists:
logger.error("%s doesn't exists in PyPI.", dependency)
return False
return True
|
python
|
def check_pypi_exists(dependencies):
"""Check if the indicated dependencies actually exists in pypi."""
for dependency in dependencies.get('pypi', []):
logger.debug("Checking if %r exists in PyPI", dependency)
try:
exists = _pypi_head_package(dependency)
except Exception as error:
logger.error("Error checking %s in PyPI: %r", dependency, error)
raise FadesError("Could not check if dependency exists in PyPI")
else:
if not exists:
logger.error("%s doesn't exists in PyPI.", dependency)
return False
return True
|
[
"def",
"check_pypi_exists",
"(",
"dependencies",
")",
":",
"for",
"dependency",
"in",
"dependencies",
".",
"get",
"(",
"'pypi'",
",",
"[",
"]",
")",
":",
"logger",
".",
"debug",
"(",
"\"Checking if %r exists in PyPI\"",
",",
"dependency",
")",
"try",
":",
"exists",
"=",
"_pypi_head_package",
"(",
"dependency",
")",
"except",
"Exception",
"as",
"error",
":",
"logger",
".",
"error",
"(",
"\"Error checking %s in PyPI: %r\"",
",",
"dependency",
",",
"error",
")",
"raise",
"FadesError",
"(",
"\"Could not check if dependency exists in PyPI\"",
")",
"else",
":",
"if",
"not",
"exists",
":",
"logger",
".",
"error",
"(",
"\"%s doesn't exists in PyPI.\"",
",",
"dependency",
")",
"return",
"False",
"return",
"True"
] |
Check if the indicated dependencies actually exists in pypi.
|
[
"Check",
"if",
"the",
"indicated",
"dependencies",
"actually",
"exists",
"in",
"pypi",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L245-L258
|
train
|
PyAr/fades
|
fades/helpers.py
|
download_remote_script
|
def download_remote_script(url):
"""Download the content of a remote script to a local temp file."""
temp_fh = tempfile.NamedTemporaryFile('wt', encoding='utf8', suffix=".py", delete=False)
downloader = _ScriptDownloader(url)
logger.info(
"Downloading remote script from %r using (%r downloader) to %r",
url, downloader.name, temp_fh.name)
content = downloader.get()
temp_fh.write(content)
temp_fh.close()
return temp_fh.name
|
python
|
def download_remote_script(url):
"""Download the content of a remote script to a local temp file."""
temp_fh = tempfile.NamedTemporaryFile('wt', encoding='utf8', suffix=".py", delete=False)
downloader = _ScriptDownloader(url)
logger.info(
"Downloading remote script from %r using (%r downloader) to %r",
url, downloader.name, temp_fh.name)
content = downloader.get()
temp_fh.write(content)
temp_fh.close()
return temp_fh.name
|
[
"def",
"download_remote_script",
"(",
"url",
")",
":",
"temp_fh",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"'wt'",
",",
"encoding",
"=",
"'utf8'",
",",
"suffix",
"=",
"\".py\"",
",",
"delete",
"=",
"False",
")",
"downloader",
"=",
"_ScriptDownloader",
"(",
"url",
")",
"logger",
".",
"info",
"(",
"\"Downloading remote script from %r using (%r downloader) to %r\"",
",",
"url",
",",
"downloader",
".",
"name",
",",
"temp_fh",
".",
"name",
")",
"content",
"=",
"downloader",
".",
"get",
"(",
")",
"temp_fh",
".",
"write",
"(",
"content",
")",
"temp_fh",
".",
"close",
"(",
")",
"return",
"temp_fh",
".",
"name"
] |
Download the content of a remote script to a local temp file.
|
[
"Download",
"the",
"content",
"of",
"a",
"remote",
"script",
"to",
"a",
"local",
"temp",
"file",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L334-L345
|
train
|
PyAr/fades
|
fades/helpers.py
|
ExecutionError.dump_to_log
|
def dump_to_log(self, logger):
"""Send the cmd info and collected stdout to logger."""
logger.error("Execution ended in %s for cmd %s", self._retcode, self._cmd)
for line in self._collected_stdout:
logger.error(STDOUT_LOG_PREFIX + line)
|
python
|
def dump_to_log(self, logger):
"""Send the cmd info and collected stdout to logger."""
logger.error("Execution ended in %s for cmd %s", self._retcode, self._cmd)
for line in self._collected_stdout:
logger.error(STDOUT_LOG_PREFIX + line)
|
[
"def",
"dump_to_log",
"(",
"self",
",",
"logger",
")",
":",
"logger",
".",
"error",
"(",
"\"Execution ended in %s for cmd %s\"",
",",
"self",
".",
"_retcode",
",",
"self",
".",
"_cmd",
")",
"for",
"line",
"in",
"self",
".",
"_collected_stdout",
":",
"logger",
".",
"error",
"(",
"STDOUT_LOG_PREFIX",
"+",
"line",
")"
] |
Send the cmd info and collected stdout to logger.
|
[
"Send",
"the",
"cmd",
"info",
"and",
"collected",
"stdout",
"to",
"logger",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L65-L69
|
train
|
PyAr/fades
|
fades/helpers.py
|
_ScriptDownloader._decide
|
def _decide(self):
"""Find out which method should be applied to download that URL."""
netloc = parse.urlparse(self.url).netloc
name = self.NETLOCS.get(netloc, 'raw')
return name
|
python
|
def _decide(self):
"""Find out which method should be applied to download that URL."""
netloc = parse.urlparse(self.url).netloc
name = self.NETLOCS.get(netloc, 'raw')
return name
|
[
"def",
"_decide",
"(",
"self",
")",
":",
"netloc",
"=",
"parse",
".",
"urlparse",
"(",
"self",
".",
"url",
")",
".",
"netloc",
"name",
"=",
"self",
".",
"NETLOCS",
".",
"get",
"(",
"netloc",
",",
"'raw'",
")",
"return",
"name"
] |
Find out which method should be applied to download that URL.
|
[
"Find",
"out",
"which",
"method",
"should",
"be",
"applied",
"to",
"download",
"that",
"URL",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L287-L291
|
train
|
PyAr/fades
|
fades/helpers.py
|
_ScriptDownloader.get
|
def get(self):
"""Get the script content from the URL using the decided downloader."""
method_name = "_download_" + self.name
method = getattr(self, method_name)
return method()
|
python
|
def get(self):
"""Get the script content from the URL using the decided downloader."""
method_name = "_download_" + self.name
method = getattr(self, method_name)
return method()
|
[
"def",
"get",
"(",
"self",
")",
":",
"method_name",
"=",
"\"_download_\"",
"+",
"self",
".",
"name",
"method",
"=",
"getattr",
"(",
"self",
",",
"method_name",
")",
"return",
"method",
"(",
")"
] |
Get the script content from the URL using the decided downloader.
|
[
"Get",
"the",
"script",
"content",
"from",
"the",
"URL",
"using",
"the",
"decided",
"downloader",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L293-L297
|
train
|
PyAr/fades
|
fades/helpers.py
|
_ScriptDownloader._download_raw
|
def _download_raw(self, url=None):
"""Download content from URL directly."""
if url is None:
url = self.url
req = request.Request(url, headers=self.HEADERS_PLAIN)
return request.urlopen(req).read().decode("utf8")
|
python
|
def _download_raw(self, url=None):
"""Download content from URL directly."""
if url is None:
url = self.url
req = request.Request(url, headers=self.HEADERS_PLAIN)
return request.urlopen(req).read().decode("utf8")
|
[
"def",
"_download_raw",
"(",
"self",
",",
"url",
"=",
"None",
")",
":",
"if",
"url",
"is",
"None",
":",
"url",
"=",
"self",
".",
"url",
"req",
"=",
"request",
".",
"Request",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"HEADERS_PLAIN",
")",
"return",
"request",
".",
"urlopen",
"(",
"req",
")",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf8\"",
")"
] |
Download content from URL directly.
|
[
"Download",
"content",
"from",
"URL",
"directly",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L299-L304
|
train
|
PyAr/fades
|
fades/helpers.py
|
_ScriptDownloader._download_linkode
|
def _download_linkode(self):
"""Download content from Linkode pastebin."""
# build the API url
linkode_id = self.url.split("/")[-1]
if linkode_id.startswith("#"):
linkode_id = linkode_id[1:]
url = "https://linkode.org/api/1/linkodes/" + linkode_id
req = request.Request(url, headers=self.HEADERS_JSON)
resp = request.urlopen(req)
raw = resp.read()
data = json.loads(raw.decode("utf8"))
content = data['content']
return content
|
python
|
def _download_linkode(self):
"""Download content from Linkode pastebin."""
# build the API url
linkode_id = self.url.split("/")[-1]
if linkode_id.startswith("#"):
linkode_id = linkode_id[1:]
url = "https://linkode.org/api/1/linkodes/" + linkode_id
req = request.Request(url, headers=self.HEADERS_JSON)
resp = request.urlopen(req)
raw = resp.read()
data = json.loads(raw.decode("utf8"))
content = data['content']
return content
|
[
"def",
"_download_linkode",
"(",
"self",
")",
":",
"# build the API url",
"linkode_id",
"=",
"self",
".",
"url",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
"if",
"linkode_id",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"linkode_id",
"=",
"linkode_id",
"[",
"1",
":",
"]",
"url",
"=",
"\"https://linkode.org/api/1/linkodes/\"",
"+",
"linkode_id",
"req",
"=",
"request",
".",
"Request",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"HEADERS_JSON",
")",
"resp",
"=",
"request",
".",
"urlopen",
"(",
"req",
")",
"raw",
"=",
"resp",
".",
"read",
"(",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"raw",
".",
"decode",
"(",
"\"utf8\"",
")",
")",
"content",
"=",
"data",
"[",
"'content'",
"]",
"return",
"content"
] |
Download content from Linkode pastebin.
|
[
"Download",
"content",
"from",
"Linkode",
"pastebin",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L306-L319
|
train
|
PyAr/fades
|
fades/helpers.py
|
_ScriptDownloader._download_pastebin
|
def _download_pastebin(self):
"""Download content from Pastebin itself."""
paste_id = self.url.split("/")[-1]
url = "https://pastebin.com/raw/" + paste_id
return self._download_raw(url)
|
python
|
def _download_pastebin(self):
"""Download content from Pastebin itself."""
paste_id = self.url.split("/")[-1]
url = "https://pastebin.com/raw/" + paste_id
return self._download_raw(url)
|
[
"def",
"_download_pastebin",
"(",
"self",
")",
":",
"paste_id",
"=",
"self",
".",
"url",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
"url",
"=",
"\"https://pastebin.com/raw/\"",
"+",
"paste_id",
"return",
"self",
".",
"_download_raw",
"(",
"url",
")"
] |
Download content from Pastebin itself.
|
[
"Download",
"content",
"from",
"Pastebin",
"itself",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L321-L325
|
train
|
PyAr/fades
|
fades/helpers.py
|
_ScriptDownloader._download_gist
|
def _download_gist(self):
"""Download content from github's pastebin."""
parts = parse.urlparse(self.url)
url = "https://gist.github.com" + parts.path + "/raw"
return self._download_raw(url)
|
python
|
def _download_gist(self):
"""Download content from github's pastebin."""
parts = parse.urlparse(self.url)
url = "https://gist.github.com" + parts.path + "/raw"
return self._download_raw(url)
|
[
"def",
"_download_gist",
"(",
"self",
")",
":",
"parts",
"=",
"parse",
".",
"urlparse",
"(",
"self",
".",
"url",
")",
"url",
"=",
"\"https://gist.github.com\"",
"+",
"parts",
".",
"path",
"+",
"\"/raw\"",
"return",
"self",
".",
"_download_raw",
"(",
"url",
")"
] |
Download content from github's pastebin.
|
[
"Download",
"content",
"from",
"github",
"s",
"pastebin",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L327-L331
|
train
|
PyAr/fades
|
setup.py
|
get_version
|
def get_version():
"""Retrieves package version from the file."""
with open('fades/_version.py') as fh:
m = re.search("\(([^']*)\)", fh.read())
if m is None:
raise ValueError("Unrecognized version in 'fades/_version.py'")
return m.groups()[0].replace(', ', '.')
|
python
|
def get_version():
"""Retrieves package version from the file."""
with open('fades/_version.py') as fh:
m = re.search("\(([^']*)\)", fh.read())
if m is None:
raise ValueError("Unrecognized version in 'fades/_version.py'")
return m.groups()[0].replace(', ', '.')
|
[
"def",
"get_version",
"(",
")",
":",
"with",
"open",
"(",
"'fades/_version.py'",
")",
"as",
"fh",
":",
"m",
"=",
"re",
".",
"search",
"(",
"\"\\(([^']*)\\)\"",
",",
"fh",
".",
"read",
"(",
")",
")",
"if",
"m",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized version in 'fades/_version.py'\"",
")",
"return",
"m",
".",
"groups",
"(",
")",
"[",
"0",
"]",
".",
"replace",
"(",
"', '",
",",
"'.'",
")"
] |
Retrieves package version from the file.
|
[
"Retrieves",
"package",
"version",
"from",
"the",
"file",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/setup.py#L53-L59
|
train
|
PyAr/fades
|
setup.py
|
CustomInstall.initialize_options
|
def initialize_options(self):
"""Run parent initialization and then fix the scripts var."""
install.initialize_options(self)
# leave the proper script according to the platform
script = SCRIPT_WIN if sys.platform == "win32" else SCRIPT_REST
self.distribution.scripts = [script]
|
python
|
def initialize_options(self):
"""Run parent initialization and then fix the scripts var."""
install.initialize_options(self)
# leave the proper script according to the platform
script = SCRIPT_WIN if sys.platform == "win32" else SCRIPT_REST
self.distribution.scripts = [script]
|
[
"def",
"initialize_options",
"(",
"self",
")",
":",
"install",
".",
"initialize_options",
"(",
"self",
")",
"# leave the proper script according to the platform",
"script",
"=",
"SCRIPT_WIN",
"if",
"sys",
".",
"platform",
"==",
"\"win32\"",
"else",
"SCRIPT_REST",
"self",
".",
"distribution",
".",
"scripts",
"=",
"[",
"script",
"]"
] |
Run parent initialization and then fix the scripts var.
|
[
"Run",
"parent",
"initialization",
"and",
"then",
"fix",
"the",
"scripts",
"var",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/setup.py#L70-L76
|
train
|
PyAr/fades
|
setup.py
|
CustomInstall.run
|
def run(self):
"""Run parent install, and then save the man file."""
install.run(self)
# man directory
if self._custom_man_dir is not None:
if not os.path.exists(self._custom_man_dir):
os.makedirs(self._custom_man_dir)
shutil.copy("man/fades.1", self._custom_man_dir)
|
python
|
def run(self):
"""Run parent install, and then save the man file."""
install.run(self)
# man directory
if self._custom_man_dir is not None:
if not os.path.exists(self._custom_man_dir):
os.makedirs(self._custom_man_dir)
shutil.copy("man/fades.1", self._custom_man_dir)
|
[
"def",
"run",
"(",
"self",
")",
":",
"install",
".",
"run",
"(",
"self",
")",
"# man directory",
"if",
"self",
".",
"_custom_man_dir",
"is",
"not",
"None",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"_custom_man_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"_custom_man_dir",
")",
"shutil",
".",
"copy",
"(",
"\"man/fades.1\"",
",",
"self",
".",
"_custom_man_dir",
")"
] |
Run parent install, and then save the man file.
|
[
"Run",
"parent",
"install",
"and",
"then",
"save",
"the",
"man",
"file",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/setup.py#L78-L86
|
train
|
PyAr/fades
|
setup.py
|
CustomInstall.finalize_options
|
def finalize_options(self):
"""Alter the installation path."""
install.finalize_options(self)
if self.prefix is None:
# no place for man page (like in a 'snap')
man_dir = None
else:
man_dir = os.path.join(self.prefix, "share", "man", "man1")
# if we have 'root', put the building path also under it (used normally
# by pbuilder)
if self.root is not None:
man_dir = os.path.join(self.root, man_dir[1:])
self._custom_man_dir = man_dir
|
python
|
def finalize_options(self):
"""Alter the installation path."""
install.finalize_options(self)
if self.prefix is None:
# no place for man page (like in a 'snap')
man_dir = None
else:
man_dir = os.path.join(self.prefix, "share", "man", "man1")
# if we have 'root', put the building path also under it (used normally
# by pbuilder)
if self.root is not None:
man_dir = os.path.join(self.root, man_dir[1:])
self._custom_man_dir = man_dir
|
[
"def",
"finalize_options",
"(",
"self",
")",
":",
"install",
".",
"finalize_options",
"(",
"self",
")",
"if",
"self",
".",
"prefix",
"is",
"None",
":",
"# no place for man page (like in a 'snap')",
"man_dir",
"=",
"None",
"else",
":",
"man_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"prefix",
",",
"\"share\"",
",",
"\"man\"",
",",
"\"man1\"",
")",
"# if we have 'root', put the building path also under it (used normally",
"# by pbuilder)",
"if",
"self",
".",
"root",
"is",
"not",
"None",
":",
"man_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"root",
",",
"man_dir",
"[",
"1",
":",
"]",
")",
"self",
".",
"_custom_man_dir",
"=",
"man_dir"
] |
Alter the installation path.
|
[
"Alter",
"the",
"installation",
"path",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/setup.py#L88-L101
|
train
|
PyAr/fades
|
fades/file_options.py
|
options_from_file
|
def options_from_file(args):
"""Get a argparse.Namespace and return it updated with options from config files.
Config files will be parsed with priority equal to his order in CONFIG_FILES.
"""
logger.debug("updating options from config files")
updated_from_file = []
for config_file in CONFIG_FILES:
logger.debug("updating from: %s", config_file)
parser = ConfigParser()
parser.read(config_file)
try:
items = parser.items('fades')
except NoSectionError:
continue
for config_key, config_value in items:
if config_value in ['true', 'false']:
config_value = config_value == 'true'
if config_key in MERGEABLE_CONFIGS:
current_value = getattr(args, config_key, [])
if current_value is None:
current_value = []
current_value.append(config_value)
setattr(args, config_key, current_value)
if not getattr(args, config_key, False) or config_key in updated_from_file:
# By default all 'store-true' arguments are False. So we only
# override them if they are False. If they are True means that the
# user is setting those on the CLI.
setattr(args, config_key, config_value)
updated_from_file.append(config_key)
logger.debug("updating %s to %s from file settings", config_key, config_value)
return args
|
python
|
def options_from_file(args):
"""Get a argparse.Namespace and return it updated with options from config files.
Config files will be parsed with priority equal to his order in CONFIG_FILES.
"""
logger.debug("updating options from config files")
updated_from_file = []
for config_file in CONFIG_FILES:
logger.debug("updating from: %s", config_file)
parser = ConfigParser()
parser.read(config_file)
try:
items = parser.items('fades')
except NoSectionError:
continue
for config_key, config_value in items:
if config_value in ['true', 'false']:
config_value = config_value == 'true'
if config_key in MERGEABLE_CONFIGS:
current_value = getattr(args, config_key, [])
if current_value is None:
current_value = []
current_value.append(config_value)
setattr(args, config_key, current_value)
if not getattr(args, config_key, False) or config_key in updated_from_file:
# By default all 'store-true' arguments are False. So we only
# override them if they are False. If they are True means that the
# user is setting those on the CLI.
setattr(args, config_key, config_value)
updated_from_file.append(config_key)
logger.debug("updating %s to %s from file settings", config_key, config_value)
return args
|
[
"def",
"options_from_file",
"(",
"args",
")",
":",
"logger",
".",
"debug",
"(",
"\"updating options from config files\"",
")",
"updated_from_file",
"=",
"[",
"]",
"for",
"config_file",
"in",
"CONFIG_FILES",
":",
"logger",
".",
"debug",
"(",
"\"updating from: %s\"",
",",
"config_file",
")",
"parser",
"=",
"ConfigParser",
"(",
")",
"parser",
".",
"read",
"(",
"config_file",
")",
"try",
":",
"items",
"=",
"parser",
".",
"items",
"(",
"'fades'",
")",
"except",
"NoSectionError",
":",
"continue",
"for",
"config_key",
",",
"config_value",
"in",
"items",
":",
"if",
"config_value",
"in",
"[",
"'true'",
",",
"'false'",
"]",
":",
"config_value",
"=",
"config_value",
"==",
"'true'",
"if",
"config_key",
"in",
"MERGEABLE_CONFIGS",
":",
"current_value",
"=",
"getattr",
"(",
"args",
",",
"config_key",
",",
"[",
"]",
")",
"if",
"current_value",
"is",
"None",
":",
"current_value",
"=",
"[",
"]",
"current_value",
".",
"append",
"(",
"config_value",
")",
"setattr",
"(",
"args",
",",
"config_key",
",",
"current_value",
")",
"if",
"not",
"getattr",
"(",
"args",
",",
"config_key",
",",
"False",
")",
"or",
"config_key",
"in",
"updated_from_file",
":",
"# By default all 'store-true' arguments are False. So we only",
"# override them if they are False. If they are True means that the",
"# user is setting those on the CLI.",
"setattr",
"(",
"args",
",",
"config_key",
",",
"config_value",
")",
"updated_from_file",
".",
"append",
"(",
"config_key",
")",
"logger",
".",
"debug",
"(",
"\"updating %s to %s from file settings\"",
",",
"config_key",
",",
"config_value",
")",
"return",
"args"
] |
Get a argparse.Namespace and return it updated with options from config files.
Config files will be parsed with priority equal to his order in CONFIG_FILES.
|
[
"Get",
"a",
"argparse",
".",
"Namespace",
"and",
"return",
"it",
"updated",
"with",
"options",
"from",
"config",
"files",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/file_options.py#L33-L66
|
train
|
PyAr/fades
|
fades/cache.py
|
VEnvsCache._venv_match
|
def _venv_match(self, installed, requirements):
"""Return True if what is installed satisfies the requirements.
This method has multiple exit-points, but only for False (because
if *anything* is not satisified, the venv is no good). Only after
all was checked, and it didn't exit, the venv is ok so return True.
"""
if not requirements:
# special case for no requirements, where we can't actually
# check anything: the venv is useful if nothing installed too
return None if installed else []
satisfying_deps = []
for repo, req_deps in requirements.items():
useful_inst = set()
if repo not in installed:
# the venv doesn't even have the repo
return None
if repo == REPO_VCS:
inst_deps = {VCSDependency(url) for url in installed[repo].keys()}
else:
inst_deps = {Distribution(project_name=dep, version=ver)
for (dep, ver) in installed[repo].items()}
for req in req_deps:
for inst in inst_deps:
if inst in req:
useful_inst.add(inst)
break
else:
# nothing installed satisfied that requirement
return None
# assure *all* that is installed is useful for the requirements
if useful_inst == inst_deps:
satisfying_deps.extend(inst_deps)
else:
return None
# it did it through!
return satisfying_deps
|
python
|
def _venv_match(self, installed, requirements):
"""Return True if what is installed satisfies the requirements.
This method has multiple exit-points, but only for False (because
if *anything* is not satisified, the venv is no good). Only after
all was checked, and it didn't exit, the venv is ok so return True.
"""
if not requirements:
# special case for no requirements, where we can't actually
# check anything: the venv is useful if nothing installed too
return None if installed else []
satisfying_deps = []
for repo, req_deps in requirements.items():
useful_inst = set()
if repo not in installed:
# the venv doesn't even have the repo
return None
if repo == REPO_VCS:
inst_deps = {VCSDependency(url) for url in installed[repo].keys()}
else:
inst_deps = {Distribution(project_name=dep, version=ver)
for (dep, ver) in installed[repo].items()}
for req in req_deps:
for inst in inst_deps:
if inst in req:
useful_inst.add(inst)
break
else:
# nothing installed satisfied that requirement
return None
# assure *all* that is installed is useful for the requirements
if useful_inst == inst_deps:
satisfying_deps.extend(inst_deps)
else:
return None
# it did it through!
return satisfying_deps
|
[
"def",
"_venv_match",
"(",
"self",
",",
"installed",
",",
"requirements",
")",
":",
"if",
"not",
"requirements",
":",
"# special case for no requirements, where we can't actually",
"# check anything: the venv is useful if nothing installed too",
"return",
"None",
"if",
"installed",
"else",
"[",
"]",
"satisfying_deps",
"=",
"[",
"]",
"for",
"repo",
",",
"req_deps",
"in",
"requirements",
".",
"items",
"(",
")",
":",
"useful_inst",
"=",
"set",
"(",
")",
"if",
"repo",
"not",
"in",
"installed",
":",
"# the venv doesn't even have the repo",
"return",
"None",
"if",
"repo",
"==",
"REPO_VCS",
":",
"inst_deps",
"=",
"{",
"VCSDependency",
"(",
"url",
")",
"for",
"url",
"in",
"installed",
"[",
"repo",
"]",
".",
"keys",
"(",
")",
"}",
"else",
":",
"inst_deps",
"=",
"{",
"Distribution",
"(",
"project_name",
"=",
"dep",
",",
"version",
"=",
"ver",
")",
"for",
"(",
"dep",
",",
"ver",
")",
"in",
"installed",
"[",
"repo",
"]",
".",
"items",
"(",
")",
"}",
"for",
"req",
"in",
"req_deps",
":",
"for",
"inst",
"in",
"inst_deps",
":",
"if",
"inst",
"in",
"req",
":",
"useful_inst",
".",
"add",
"(",
"inst",
")",
"break",
"else",
":",
"# nothing installed satisfied that requirement",
"return",
"None",
"# assure *all* that is installed is useful for the requirements",
"if",
"useful_inst",
"==",
"inst_deps",
":",
"satisfying_deps",
".",
"extend",
"(",
"inst_deps",
")",
"else",
":",
"return",
"None",
"# it did it through!",
"return",
"satisfying_deps"
] |
Return True if what is installed satisfies the requirements.
This method has multiple exit-points, but only for False (because
if *anything* is not satisified, the venv is no good). Only after
all was checked, and it didn't exit, the venv is ok so return True.
|
[
"Return",
"True",
"if",
"what",
"is",
"installed",
"satisfies",
"the",
"requirements",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/cache.py#L44-L84
|
train
|
PyAr/fades
|
fades/cache.py
|
VEnvsCache._match_by_uuid
|
def _match_by_uuid(self, current_venvs, uuid):
"""Select a venv matching exactly by uuid."""
for venv_str in current_venvs:
venv = json.loads(venv_str)
env_path = venv.get('metadata', {}).get('env_path')
_, env_uuid = os.path.split(env_path)
if env_uuid == uuid:
return venv
|
python
|
def _match_by_uuid(self, current_venvs, uuid):
"""Select a venv matching exactly by uuid."""
for venv_str in current_venvs:
venv = json.loads(venv_str)
env_path = venv.get('metadata', {}).get('env_path')
_, env_uuid = os.path.split(env_path)
if env_uuid == uuid:
return venv
|
[
"def",
"_match_by_uuid",
"(",
"self",
",",
"current_venvs",
",",
"uuid",
")",
":",
"for",
"venv_str",
"in",
"current_venvs",
":",
"venv",
"=",
"json",
".",
"loads",
"(",
"venv_str",
")",
"env_path",
"=",
"venv",
".",
"get",
"(",
"'metadata'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'env_path'",
")",
"_",
",",
"env_uuid",
"=",
"os",
".",
"path",
".",
"split",
"(",
"env_path",
")",
"if",
"env_uuid",
"==",
"uuid",
":",
"return",
"venv"
] |
Select a venv matching exactly by uuid.
|
[
"Select",
"a",
"venv",
"matching",
"exactly",
"by",
"uuid",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/cache.py#L86-L93
|
train
|
PyAr/fades
|
fades/cache.py
|
VEnvsCache._select_better_fit
|
def _select_better_fit(self, matching_venvs):
"""Receive a list of matching venvs, and decide which one is the best fit."""
# keep the venvs in a separate array, to pick up the winner, and the (sorted, to compare
# each dependency with its equivalent) in other structure to later compare
venvs = []
to_compare = []
for matching, venv in matching_venvs:
to_compare.append(sorted(matching, key=lambda req: getattr(req, 'key', '')))
venvs.append(venv)
# compare each n-tuple of dependencies to see which one is bigger, and add score to the
# position of the winner
scores = [0] * len(venvs)
for dependencies in zip(*to_compare):
if not isinstance(dependencies[0], Distribution):
# only distribution URLs can be compared
continue
winner = dependencies.index(max(dependencies))
scores[winner] = scores[winner] + 1
# get the rightmost winner (in case of ties, to select the latest venv)
winner_pos = None
winner_score = -1
for i, score in enumerate(scores):
if score >= winner_score:
winner_score = score
winner_pos = i
return venvs[winner_pos]
|
python
|
def _select_better_fit(self, matching_venvs):
"""Receive a list of matching venvs, and decide which one is the best fit."""
# keep the venvs in a separate array, to pick up the winner, and the (sorted, to compare
# each dependency with its equivalent) in other structure to later compare
venvs = []
to_compare = []
for matching, venv in matching_venvs:
to_compare.append(sorted(matching, key=lambda req: getattr(req, 'key', '')))
venvs.append(venv)
# compare each n-tuple of dependencies to see which one is bigger, and add score to the
# position of the winner
scores = [0] * len(venvs)
for dependencies in zip(*to_compare):
if not isinstance(dependencies[0], Distribution):
# only distribution URLs can be compared
continue
winner = dependencies.index(max(dependencies))
scores[winner] = scores[winner] + 1
# get the rightmost winner (in case of ties, to select the latest venv)
winner_pos = None
winner_score = -1
for i, score in enumerate(scores):
if score >= winner_score:
winner_score = score
winner_pos = i
return venvs[winner_pos]
|
[
"def",
"_select_better_fit",
"(",
"self",
",",
"matching_venvs",
")",
":",
"# keep the venvs in a separate array, to pick up the winner, and the (sorted, to compare",
"# each dependency with its equivalent) in other structure to later compare",
"venvs",
"=",
"[",
"]",
"to_compare",
"=",
"[",
"]",
"for",
"matching",
",",
"venv",
"in",
"matching_venvs",
":",
"to_compare",
".",
"append",
"(",
"sorted",
"(",
"matching",
",",
"key",
"=",
"lambda",
"req",
":",
"getattr",
"(",
"req",
",",
"'key'",
",",
"''",
")",
")",
")",
"venvs",
".",
"append",
"(",
"venv",
")",
"# compare each n-tuple of dependencies to see which one is bigger, and add score to the",
"# position of the winner",
"scores",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"venvs",
")",
"for",
"dependencies",
"in",
"zip",
"(",
"*",
"to_compare",
")",
":",
"if",
"not",
"isinstance",
"(",
"dependencies",
"[",
"0",
"]",
",",
"Distribution",
")",
":",
"# only distribution URLs can be compared",
"continue",
"winner",
"=",
"dependencies",
".",
"index",
"(",
"max",
"(",
"dependencies",
")",
")",
"scores",
"[",
"winner",
"]",
"=",
"scores",
"[",
"winner",
"]",
"+",
"1",
"# get the rightmost winner (in case of ties, to select the latest venv)",
"winner_pos",
"=",
"None",
"winner_score",
"=",
"-",
"1",
"for",
"i",
",",
"score",
"in",
"enumerate",
"(",
"scores",
")",
":",
"if",
"score",
">=",
"winner_score",
":",
"winner_score",
"=",
"score",
"winner_pos",
"=",
"i",
"return",
"venvs",
"[",
"winner_pos",
"]"
] |
Receive a list of matching venvs, and decide which one is the best fit.
|
[
"Receive",
"a",
"list",
"of",
"matching",
"venvs",
"and",
"decide",
"which",
"one",
"is",
"the",
"best",
"fit",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/cache.py#L95-L123
|
train
|
PyAr/fades
|
fades/cache.py
|
VEnvsCache._match_by_requirements
|
def _match_by_requirements(self, current_venvs, requirements, interpreter, options):
"""Select a venv matching interpreter and options, complying with requirements.
Several venvs can be found in this case, will return the better fit.
"""
matching_venvs = []
for venv_str in current_venvs:
venv = json.loads(venv_str)
# simple filter, need to have exactly same options and interpreter
if venv.get('options') != options or venv.get('interpreter') != interpreter:
continue
# requirements complying: result can be None (no comply) or a score to later sort
matching = self._venv_match(venv['installed'], requirements)
if matching is not None:
matching_venvs.append((matching, venv))
if not matching_venvs:
return
return self._select_better_fit(matching_venvs)
|
python
|
def _match_by_requirements(self, current_venvs, requirements, interpreter, options):
"""Select a venv matching interpreter and options, complying with requirements.
Several venvs can be found in this case, will return the better fit.
"""
matching_venvs = []
for venv_str in current_venvs:
venv = json.loads(venv_str)
# simple filter, need to have exactly same options and interpreter
if venv.get('options') != options or venv.get('interpreter') != interpreter:
continue
# requirements complying: result can be None (no comply) or a score to later sort
matching = self._venv_match(venv['installed'], requirements)
if matching is not None:
matching_venvs.append((matching, venv))
if not matching_venvs:
return
return self._select_better_fit(matching_venvs)
|
[
"def",
"_match_by_requirements",
"(",
"self",
",",
"current_venvs",
",",
"requirements",
",",
"interpreter",
",",
"options",
")",
":",
"matching_venvs",
"=",
"[",
"]",
"for",
"venv_str",
"in",
"current_venvs",
":",
"venv",
"=",
"json",
".",
"loads",
"(",
"venv_str",
")",
"# simple filter, need to have exactly same options and interpreter",
"if",
"venv",
".",
"get",
"(",
"'options'",
")",
"!=",
"options",
"or",
"venv",
".",
"get",
"(",
"'interpreter'",
")",
"!=",
"interpreter",
":",
"continue",
"# requirements complying: result can be None (no comply) or a score to later sort",
"matching",
"=",
"self",
".",
"_venv_match",
"(",
"venv",
"[",
"'installed'",
"]",
",",
"requirements",
")",
"if",
"matching",
"is",
"not",
"None",
":",
"matching_venvs",
".",
"append",
"(",
"(",
"matching",
",",
"venv",
")",
")",
"if",
"not",
"matching_venvs",
":",
"return",
"return",
"self",
".",
"_select_better_fit",
"(",
"matching_venvs",
")"
] |
Select a venv matching interpreter and options, complying with requirements.
Several venvs can be found in this case, will return the better fit.
|
[
"Select",
"a",
"venv",
"matching",
"interpreter",
"and",
"options",
"complying",
"with",
"requirements",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/cache.py#L125-L146
|
train
|
PyAr/fades
|
fades/cache.py
|
VEnvsCache._select
|
def _select(self, current_venvs, requirements=None, interpreter='', uuid='', options=None):
"""Select which venv satisfy the received requirements."""
if uuid:
logger.debug("Searching a venv by uuid: %s", uuid)
venv = self._match_by_uuid(current_venvs, uuid)
else:
logger.debug("Searching a venv for: reqs=%s interpreter=%s options=%s",
requirements, interpreter, options)
venv = self._match_by_requirements(current_venvs, requirements, interpreter, options)
if venv is None:
logger.debug("No matching venv found :(")
return
logger.debug("Found a matching venv! %s", venv)
return venv['metadata']
|
python
|
def _select(self, current_venvs, requirements=None, interpreter='', uuid='', options=None):
"""Select which venv satisfy the received requirements."""
if uuid:
logger.debug("Searching a venv by uuid: %s", uuid)
venv = self._match_by_uuid(current_venvs, uuid)
else:
logger.debug("Searching a venv for: reqs=%s interpreter=%s options=%s",
requirements, interpreter, options)
venv = self._match_by_requirements(current_venvs, requirements, interpreter, options)
if venv is None:
logger.debug("No matching venv found :(")
return
logger.debug("Found a matching venv! %s", venv)
return venv['metadata']
|
[
"def",
"_select",
"(",
"self",
",",
"current_venvs",
",",
"requirements",
"=",
"None",
",",
"interpreter",
"=",
"''",
",",
"uuid",
"=",
"''",
",",
"options",
"=",
"None",
")",
":",
"if",
"uuid",
":",
"logger",
".",
"debug",
"(",
"\"Searching a venv by uuid: %s\"",
",",
"uuid",
")",
"venv",
"=",
"self",
".",
"_match_by_uuid",
"(",
"current_venvs",
",",
"uuid",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Searching a venv for: reqs=%s interpreter=%s options=%s\"",
",",
"requirements",
",",
"interpreter",
",",
"options",
")",
"venv",
"=",
"self",
".",
"_match_by_requirements",
"(",
"current_venvs",
",",
"requirements",
",",
"interpreter",
",",
"options",
")",
"if",
"venv",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"No matching venv found :(\"",
")",
"return",
"logger",
".",
"debug",
"(",
"\"Found a matching venv! %s\"",
",",
"venv",
")",
"return",
"venv",
"[",
"'metadata'",
"]"
] |
Select which venv satisfy the received requirements.
|
[
"Select",
"which",
"venv",
"satisfy",
"the",
"received",
"requirements",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/cache.py#L148-L163
|
train
|
PyAr/fades
|
fades/cache.py
|
VEnvsCache.get_venv
|
def get_venv(self, requirements=None, interpreter='', uuid='', options=None):
"""Find a venv that serves these requirements, if any."""
lines = self._read_cache()
return self._select(lines, requirements, interpreter, uuid=uuid, options=options)
|
python
|
def get_venv(self, requirements=None, interpreter='', uuid='', options=None):
"""Find a venv that serves these requirements, if any."""
lines = self._read_cache()
return self._select(lines, requirements, interpreter, uuid=uuid, options=options)
|
[
"def",
"get_venv",
"(",
"self",
",",
"requirements",
"=",
"None",
",",
"interpreter",
"=",
"''",
",",
"uuid",
"=",
"''",
",",
"options",
"=",
"None",
")",
":",
"lines",
"=",
"self",
".",
"_read_cache",
"(",
")",
"return",
"self",
".",
"_select",
"(",
"lines",
",",
"requirements",
",",
"interpreter",
",",
"uuid",
"=",
"uuid",
",",
"options",
"=",
"options",
")"
] |
Find a venv that serves these requirements, if any.
|
[
"Find",
"a",
"venv",
"that",
"serves",
"these",
"requirements",
"if",
"any",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/cache.py#L165-L168
|
train
|
PyAr/fades
|
fades/cache.py
|
VEnvsCache.store
|
def store(self, installed_stuff, metadata, interpreter, options):
"""Store the virtualenv metadata for the indicated installed_stuff."""
new_content = {
'timestamp': int(time.mktime(time.localtime())),
'installed': installed_stuff,
'metadata': metadata,
'interpreter': interpreter,
'options': options
}
logger.debug("Storing installed=%s metadata=%s interpreter=%s options=%s",
installed_stuff, metadata, interpreter, options)
with filelock(self.lockpath):
self._write_cache([json.dumps(new_content)], append=True)
|
python
|
def store(self, installed_stuff, metadata, interpreter, options):
"""Store the virtualenv metadata for the indicated installed_stuff."""
new_content = {
'timestamp': int(time.mktime(time.localtime())),
'installed': installed_stuff,
'metadata': metadata,
'interpreter': interpreter,
'options': options
}
logger.debug("Storing installed=%s metadata=%s interpreter=%s options=%s",
installed_stuff, metadata, interpreter, options)
with filelock(self.lockpath):
self._write_cache([json.dumps(new_content)], append=True)
|
[
"def",
"store",
"(",
"self",
",",
"installed_stuff",
",",
"metadata",
",",
"interpreter",
",",
"options",
")",
":",
"new_content",
"=",
"{",
"'timestamp'",
":",
"int",
"(",
"time",
".",
"mktime",
"(",
"time",
".",
"localtime",
"(",
")",
")",
")",
",",
"'installed'",
":",
"installed_stuff",
",",
"'metadata'",
":",
"metadata",
",",
"'interpreter'",
":",
"interpreter",
",",
"'options'",
":",
"options",
"}",
"logger",
".",
"debug",
"(",
"\"Storing installed=%s metadata=%s interpreter=%s options=%s\"",
",",
"installed_stuff",
",",
"metadata",
",",
"interpreter",
",",
"options",
")",
"with",
"filelock",
"(",
"self",
".",
"lockpath",
")",
":",
"self",
".",
"_write_cache",
"(",
"[",
"json",
".",
"dumps",
"(",
"new_content",
")",
"]",
",",
"append",
"=",
"True",
")"
] |
Store the virtualenv metadata for the indicated installed_stuff.
|
[
"Store",
"the",
"virtualenv",
"metadata",
"for",
"the",
"indicated",
"installed_stuff",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/cache.py#L175-L187
|
train
|
PyAr/fades
|
fades/cache.py
|
VEnvsCache.remove
|
def remove(self, env_path):
"""Remove metadata for a given virtualenv from cache."""
with filelock(self.lockpath):
cache = self._read_cache()
logger.debug("Removing virtualenv from cache: %s" % env_path)
lines = [
line for line in cache
if json.loads(line).get('metadata', {}).get('env_path') != env_path
]
self._write_cache(lines)
|
python
|
def remove(self, env_path):
"""Remove metadata for a given virtualenv from cache."""
with filelock(self.lockpath):
cache = self._read_cache()
logger.debug("Removing virtualenv from cache: %s" % env_path)
lines = [
line for line in cache
if json.loads(line).get('metadata', {}).get('env_path') != env_path
]
self._write_cache(lines)
|
[
"def",
"remove",
"(",
"self",
",",
"env_path",
")",
":",
"with",
"filelock",
"(",
"self",
".",
"lockpath",
")",
":",
"cache",
"=",
"self",
".",
"_read_cache",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Removing virtualenv from cache: %s\"",
"%",
"env_path",
")",
"lines",
"=",
"[",
"line",
"for",
"line",
"in",
"cache",
"if",
"json",
".",
"loads",
"(",
"line",
")",
".",
"get",
"(",
"'metadata'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'env_path'",
")",
"!=",
"env_path",
"]",
"self",
".",
"_write_cache",
"(",
"lines",
")"
] |
Remove metadata for a given virtualenv from cache.
|
[
"Remove",
"metadata",
"for",
"a",
"given",
"virtualenv",
"from",
"cache",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/cache.py#L189-L198
|
train
|
PyAr/fades
|
fades/cache.py
|
VEnvsCache._read_cache
|
def _read_cache(self):
"""Read virtualenv metadata from cache."""
if os.path.exists(self.filepath):
with open(self.filepath, 'rt', encoding='utf8') as fh:
lines = [x.strip() for x in fh]
else:
logger.debug("Index not found, starting empty")
lines = []
return lines
|
python
|
def _read_cache(self):
"""Read virtualenv metadata from cache."""
if os.path.exists(self.filepath):
with open(self.filepath, 'rt', encoding='utf8') as fh:
lines = [x.strip() for x in fh]
else:
logger.debug("Index not found, starting empty")
lines = []
return lines
|
[
"def",
"_read_cache",
"(",
"self",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"filepath",
")",
":",
"with",
"open",
"(",
"self",
".",
"filepath",
",",
"'rt'",
",",
"encoding",
"=",
"'utf8'",
")",
"as",
"fh",
":",
"lines",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"fh",
"]",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Index not found, starting empty\"",
")",
"lines",
"=",
"[",
"]",
"return",
"lines"
] |
Read virtualenv metadata from cache.
|
[
"Read",
"virtualenv",
"metadata",
"from",
"cache",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/cache.py#L200-L208
|
train
|
PyAr/fades
|
fades/cache.py
|
VEnvsCache._write_cache
|
def _write_cache(self, lines, append=False):
"""Write virtualenv metadata to cache."""
mode = 'at' if append else 'wt'
with open(self.filepath, mode, encoding='utf8') as fh:
fh.writelines(line + '\n' for line in lines)
|
python
|
def _write_cache(self, lines, append=False):
"""Write virtualenv metadata to cache."""
mode = 'at' if append else 'wt'
with open(self.filepath, mode, encoding='utf8') as fh:
fh.writelines(line + '\n' for line in lines)
|
[
"def",
"_write_cache",
"(",
"self",
",",
"lines",
",",
"append",
"=",
"False",
")",
":",
"mode",
"=",
"'at'",
"if",
"append",
"else",
"'wt'",
"with",
"open",
"(",
"self",
".",
"filepath",
",",
"mode",
",",
"encoding",
"=",
"'utf8'",
")",
"as",
"fh",
":",
"fh",
".",
"writelines",
"(",
"line",
"+",
"'\\n'",
"for",
"line",
"in",
"lines",
")"
] |
Write virtualenv metadata to cache.
|
[
"Write",
"virtualenv",
"metadata",
"to",
"cache",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/cache.py#L210-L214
|
train
|
PyAr/fades
|
fades/pipmanager.py
|
PipManager.install
|
def install(self, dependency):
"""Install a new dependency."""
if not self.pip_installed:
logger.info("Need to install a dependency with pip, but no builtin, "
"doing it manually (just wait a little, all should go well)")
self._brute_force_install_pip()
# split to pass several tokens on multiword dependency (this is very specific for '-e' on
# external requirements, but implemented generically; note that this does not apply for
# normal reqs, because even if it originally is 'foo > 1.2', after parsing it loses the
# internal spaces)
str_dep = str(dependency)
args = [self.pip_exe, "install"] + str_dep.split()
if self.options:
for option in self.options:
args.extend(option.split())
logger.info("Installing dependency: %r", str_dep)
try:
helpers.logged_exec(args)
except helpers.ExecutionError as error:
error.dump_to_log(logger)
raise error
except Exception as error:
logger.exception("Error installing %s: %s", str_dep, error)
raise error
|
python
|
def install(self, dependency):
"""Install a new dependency."""
if not self.pip_installed:
logger.info("Need to install a dependency with pip, but no builtin, "
"doing it manually (just wait a little, all should go well)")
self._brute_force_install_pip()
# split to pass several tokens on multiword dependency (this is very specific for '-e' on
# external requirements, but implemented generically; note that this does not apply for
# normal reqs, because even if it originally is 'foo > 1.2', after parsing it loses the
# internal spaces)
str_dep = str(dependency)
args = [self.pip_exe, "install"] + str_dep.split()
if self.options:
for option in self.options:
args.extend(option.split())
logger.info("Installing dependency: %r", str_dep)
try:
helpers.logged_exec(args)
except helpers.ExecutionError as error:
error.dump_to_log(logger)
raise error
except Exception as error:
logger.exception("Error installing %s: %s", str_dep, error)
raise error
|
[
"def",
"install",
"(",
"self",
",",
"dependency",
")",
":",
"if",
"not",
"self",
".",
"pip_installed",
":",
"logger",
".",
"info",
"(",
"\"Need to install a dependency with pip, but no builtin, \"",
"\"doing it manually (just wait a little, all should go well)\"",
")",
"self",
".",
"_brute_force_install_pip",
"(",
")",
"# split to pass several tokens on multiword dependency (this is very specific for '-e' on",
"# external requirements, but implemented generically; note that this does not apply for",
"# normal reqs, because even if it originally is 'foo > 1.2', after parsing it loses the",
"# internal spaces)",
"str_dep",
"=",
"str",
"(",
"dependency",
")",
"args",
"=",
"[",
"self",
".",
"pip_exe",
",",
"\"install\"",
"]",
"+",
"str_dep",
".",
"split",
"(",
")",
"if",
"self",
".",
"options",
":",
"for",
"option",
"in",
"self",
".",
"options",
":",
"args",
".",
"extend",
"(",
"option",
".",
"split",
"(",
")",
")",
"logger",
".",
"info",
"(",
"\"Installing dependency: %r\"",
",",
"str_dep",
")",
"try",
":",
"helpers",
".",
"logged_exec",
"(",
"args",
")",
"except",
"helpers",
".",
"ExecutionError",
"as",
"error",
":",
"error",
".",
"dump_to_log",
"(",
"logger",
")",
"raise",
"error",
"except",
"Exception",
"as",
"error",
":",
"logger",
".",
"exception",
"(",
"\"Error installing %s: %s\"",
",",
"str_dep",
",",
"error",
")",
"raise",
"error"
] |
Install a new dependency.
|
[
"Install",
"a",
"new",
"dependency",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/pipmanager.py#L50-L75
|
train
|
PyAr/fades
|
fades/pipmanager.py
|
PipManager.get_version
|
def get_version(self, dependency):
"""Return the installed version parsing the output of 'pip show'."""
logger.debug("getting installed version for %s", dependency)
stdout = helpers.logged_exec([self.pip_exe, "show", str(dependency)])
version = [line for line in stdout if line.startswith('Version:')]
if len(version) == 1:
version = version[0].strip().split()[1]
logger.debug("Installed version of %s is: %s", dependency, version)
return version
else:
logger.error('Fades is having problems getting the installed version. '
'Run with -v or check the logs for details')
return ''
|
python
|
def get_version(self, dependency):
"""Return the installed version parsing the output of 'pip show'."""
logger.debug("getting installed version for %s", dependency)
stdout = helpers.logged_exec([self.pip_exe, "show", str(dependency)])
version = [line for line in stdout if line.startswith('Version:')]
if len(version) == 1:
version = version[0].strip().split()[1]
logger.debug("Installed version of %s is: %s", dependency, version)
return version
else:
logger.error('Fades is having problems getting the installed version. '
'Run with -v or check the logs for details')
return ''
|
[
"def",
"get_version",
"(",
"self",
",",
"dependency",
")",
":",
"logger",
".",
"debug",
"(",
"\"getting installed version for %s\"",
",",
"dependency",
")",
"stdout",
"=",
"helpers",
".",
"logged_exec",
"(",
"[",
"self",
".",
"pip_exe",
",",
"\"show\"",
",",
"str",
"(",
"dependency",
")",
"]",
")",
"version",
"=",
"[",
"line",
"for",
"line",
"in",
"stdout",
"if",
"line",
".",
"startswith",
"(",
"'Version:'",
")",
"]",
"if",
"len",
"(",
"version",
")",
"==",
"1",
":",
"version",
"=",
"version",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"1",
"]",
"logger",
".",
"debug",
"(",
"\"Installed version of %s is: %s\"",
",",
"dependency",
",",
"version",
")",
"return",
"version",
"else",
":",
"logger",
".",
"error",
"(",
"'Fades is having problems getting the installed version. '",
"'Run with -v or check the logs for details'",
")",
"return",
"''"
] |
Return the installed version parsing the output of 'pip show'.
|
[
"Return",
"the",
"installed",
"version",
"parsing",
"the",
"output",
"of",
"pip",
"show",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/pipmanager.py#L77-L89
|
train
|
PyAr/fades
|
fades/pipmanager.py
|
PipManager._brute_force_install_pip
|
def _brute_force_install_pip(self):
"""A brute force install of pip itself."""
if os.path.exists(self.pip_installer_fname):
logger.debug("Using pip installer from %r", self.pip_installer_fname)
else:
logger.debug(
"Installer for pip not found in %r, downloading it", self.pip_installer_fname)
self._download_pip_installer()
logger.debug("Installing PIP manually in the virtualenv")
python_exe = os.path.join(self.env_bin_path, "python")
helpers.logged_exec([python_exe, self.pip_installer_fname, '-I'])
self.pip_installed = True
|
python
|
def _brute_force_install_pip(self):
"""A brute force install of pip itself."""
if os.path.exists(self.pip_installer_fname):
logger.debug("Using pip installer from %r", self.pip_installer_fname)
else:
logger.debug(
"Installer for pip not found in %r, downloading it", self.pip_installer_fname)
self._download_pip_installer()
logger.debug("Installing PIP manually in the virtualenv")
python_exe = os.path.join(self.env_bin_path, "python")
helpers.logged_exec([python_exe, self.pip_installer_fname, '-I'])
self.pip_installed = True
|
[
"def",
"_brute_force_install_pip",
"(",
"self",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"pip_installer_fname",
")",
":",
"logger",
".",
"debug",
"(",
"\"Using pip installer from %r\"",
",",
"self",
".",
"pip_installer_fname",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Installer for pip not found in %r, downloading it\"",
",",
"self",
".",
"pip_installer_fname",
")",
"self",
".",
"_download_pip_installer",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Installing PIP manually in the virtualenv\"",
")",
"python_exe",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"env_bin_path",
",",
"\"python\"",
")",
"helpers",
".",
"logged_exec",
"(",
"[",
"python_exe",
",",
"self",
".",
"pip_installer_fname",
",",
"'-I'",
"]",
")",
"self",
".",
"pip_installed",
"=",
"True"
] |
A brute force install of pip itself.
|
[
"A",
"brute",
"force",
"install",
"of",
"pip",
"itself",
"."
] |
e5ea457b09b105f321d4f81772f25e8695159604
|
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/pipmanager.py#L98-L110
|
train
|
albertyw/csv-ical
|
csv_ical/convert.py
|
Convert._generate_configs_from_default
|
def _generate_configs_from_default(self, overrides=None):
# type: (Dict[str, int]) -> Dict[str, int]
""" Generate configs by inheriting from defaults """
config = DEFAULT_CONFIG.copy()
if not overrides:
overrides = {}
for k, v in overrides.items():
config[k] = v
return config
|
python
|
def _generate_configs_from_default(self, overrides=None):
# type: (Dict[str, int]) -> Dict[str, int]
""" Generate configs by inheriting from defaults """
config = DEFAULT_CONFIG.copy()
if not overrides:
overrides = {}
for k, v in overrides.items():
config[k] = v
return config
|
[
"def",
"_generate_configs_from_default",
"(",
"self",
",",
"overrides",
"=",
"None",
")",
":",
"# type: (Dict[str, int]) -> Dict[str, int]",
"config",
"=",
"DEFAULT_CONFIG",
".",
"copy",
"(",
")",
"if",
"not",
"overrides",
":",
"overrides",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"overrides",
".",
"items",
"(",
")",
":",
"config",
"[",
"k",
"]",
"=",
"v",
"return",
"config"
] |
Generate configs by inheriting from defaults
|
[
"Generate",
"configs",
"by",
"inheriting",
"from",
"defaults"
] |
cdb55a226cd0cb6cc214d896a6cea41a5b92c9ed
|
https://github.com/albertyw/csv-ical/blob/cdb55a226cd0cb6cc214d896a6cea41a5b92c9ed/csv_ical/convert.py#L29-L37
|
train
|
albertyw/csv-ical
|
csv_ical/convert.py
|
Convert.read_ical
|
def read_ical(self, ical_file_location): # type: (str) -> Calendar
""" Read the ical file """
with open(ical_file_location, 'r') as ical_file:
data = ical_file.read()
self.cal = Calendar.from_ical(data)
return self.cal
|
python
|
def read_ical(self, ical_file_location): # type: (str) -> Calendar
""" Read the ical file """
with open(ical_file_location, 'r') as ical_file:
data = ical_file.read()
self.cal = Calendar.from_ical(data)
return self.cal
|
[
"def",
"read_ical",
"(",
"self",
",",
"ical_file_location",
")",
":",
"# type: (str) -> Calendar",
"with",
"open",
"(",
"ical_file_location",
",",
"'r'",
")",
"as",
"ical_file",
":",
"data",
"=",
"ical_file",
".",
"read",
"(",
")",
"self",
".",
"cal",
"=",
"Calendar",
".",
"from_ical",
"(",
"data",
")",
"return",
"self",
".",
"cal"
] |
Read the ical file
|
[
"Read",
"the",
"ical",
"file"
] |
cdb55a226cd0cb6cc214d896a6cea41a5b92c9ed
|
https://github.com/albertyw/csv-ical/blob/cdb55a226cd0cb6cc214d896a6cea41a5b92c9ed/csv_ical/convert.py#L39-L44
|
train
|
albertyw/csv-ical
|
csv_ical/convert.py
|
Convert.read_csv
|
def read_csv(self, csv_location, csv_configs=None):
# type: (str, Dict[str, int]) -> List[List[str]]
""" Read the csv file """
csv_configs = self._generate_configs_from_default(csv_configs)
with open(csv_location, 'r') as csv_file:
csv_reader = csv.reader(csv_file)
self.csv_data = list(csv_reader)
self.csv_data = self.csv_data[csv_configs['HEADER_COLUMNS_TO_SKIP']:]
return self.csv_data
|
python
|
def read_csv(self, csv_location, csv_configs=None):
# type: (str, Dict[str, int]) -> List[List[str]]
""" Read the csv file """
csv_configs = self._generate_configs_from_default(csv_configs)
with open(csv_location, 'r') as csv_file:
csv_reader = csv.reader(csv_file)
self.csv_data = list(csv_reader)
self.csv_data = self.csv_data[csv_configs['HEADER_COLUMNS_TO_SKIP']:]
return self.csv_data
|
[
"def",
"read_csv",
"(",
"self",
",",
"csv_location",
",",
"csv_configs",
"=",
"None",
")",
":",
"# type: (str, Dict[str, int]) -> List[List[str]]",
"csv_configs",
"=",
"self",
".",
"_generate_configs_from_default",
"(",
"csv_configs",
")",
"with",
"open",
"(",
"csv_location",
",",
"'r'",
")",
"as",
"csv_file",
":",
"csv_reader",
"=",
"csv",
".",
"reader",
"(",
"csv_file",
")",
"self",
".",
"csv_data",
"=",
"list",
"(",
"csv_reader",
")",
"self",
".",
"csv_data",
"=",
"self",
".",
"csv_data",
"[",
"csv_configs",
"[",
"'HEADER_COLUMNS_TO_SKIP'",
"]",
":",
"]",
"return",
"self",
".",
"csv_data"
] |
Read the csv file
|
[
"Read",
"the",
"csv",
"file"
] |
cdb55a226cd0cb6cc214d896a6cea41a5b92c9ed
|
https://github.com/albertyw/csv-ical/blob/cdb55a226cd0cb6cc214d896a6cea41a5b92c9ed/csv_ical/convert.py#L46-L54
|
train
|
albertyw/csv-ical
|
csv_ical/convert.py
|
Convert.make_ical
|
def make_ical(self, csv_configs=None):
# type: (Dict[str, int]) -> Calendar
""" Make iCal entries """
csv_configs = self._generate_configs_from_default(csv_configs)
self.cal = Calendar()
for row in self.csv_data:
event = Event()
event.add('summary', row[csv_configs['CSV_NAME']])
event.add('dtstart', row[csv_configs['CSV_START_DATE']])
event.add('dtend', row[csv_configs['CSV_END_DATE']])
event.add('description', row[csv_configs['CSV_DESCRIPTION']])
event.add('location', row[csv_configs['CSV_LOCATION']])
self.cal.add_component(event)
return self.cal
|
python
|
def make_ical(self, csv_configs=None):
# type: (Dict[str, int]) -> Calendar
""" Make iCal entries """
csv_configs = self._generate_configs_from_default(csv_configs)
self.cal = Calendar()
for row in self.csv_data:
event = Event()
event.add('summary', row[csv_configs['CSV_NAME']])
event.add('dtstart', row[csv_configs['CSV_START_DATE']])
event.add('dtend', row[csv_configs['CSV_END_DATE']])
event.add('description', row[csv_configs['CSV_DESCRIPTION']])
event.add('location', row[csv_configs['CSV_LOCATION']])
self.cal.add_component(event)
return self.cal
|
[
"def",
"make_ical",
"(",
"self",
",",
"csv_configs",
"=",
"None",
")",
":",
"# type: (Dict[str, int]) -> Calendar",
"csv_configs",
"=",
"self",
".",
"_generate_configs_from_default",
"(",
"csv_configs",
")",
"self",
".",
"cal",
"=",
"Calendar",
"(",
")",
"for",
"row",
"in",
"self",
".",
"csv_data",
":",
"event",
"=",
"Event",
"(",
")",
"event",
".",
"add",
"(",
"'summary'",
",",
"row",
"[",
"csv_configs",
"[",
"'CSV_NAME'",
"]",
"]",
")",
"event",
".",
"add",
"(",
"'dtstart'",
",",
"row",
"[",
"csv_configs",
"[",
"'CSV_START_DATE'",
"]",
"]",
")",
"event",
".",
"add",
"(",
"'dtend'",
",",
"row",
"[",
"csv_configs",
"[",
"'CSV_END_DATE'",
"]",
"]",
")",
"event",
".",
"add",
"(",
"'description'",
",",
"row",
"[",
"csv_configs",
"[",
"'CSV_DESCRIPTION'",
"]",
"]",
")",
"event",
".",
"add",
"(",
"'location'",
",",
"row",
"[",
"csv_configs",
"[",
"'CSV_LOCATION'",
"]",
"]",
")",
"self",
".",
"cal",
".",
"add_component",
"(",
"event",
")",
"return",
"self",
".",
"cal"
] |
Make iCal entries
|
[
"Make",
"iCal",
"entries"
] |
cdb55a226cd0cb6cc214d896a6cea41a5b92c9ed
|
https://github.com/albertyw/csv-ical/blob/cdb55a226cd0cb6cc214d896a6cea41a5b92c9ed/csv_ical/convert.py#L56-L69
|
train
|
albertyw/csv-ical
|
csv_ical/convert.py
|
Convert.save_ical
|
def save_ical(self, ical_location): # type: (str) -> None
""" Save the calendar instance to a file """
data = self.cal.to_ical()
with open(ical_location, 'w') as ical_file:
ical_file.write(data.decode('utf-8'))
|
python
|
def save_ical(self, ical_location): # type: (str) -> None
""" Save the calendar instance to a file """
data = self.cal.to_ical()
with open(ical_location, 'w') as ical_file:
ical_file.write(data.decode('utf-8'))
|
[
"def",
"save_ical",
"(",
"self",
",",
"ical_location",
")",
":",
"# type: (str) -> None",
"data",
"=",
"self",
".",
"cal",
".",
"to_ical",
"(",
")",
"with",
"open",
"(",
"ical_location",
",",
"'w'",
")",
"as",
"ical_file",
":",
"ical_file",
".",
"write",
"(",
"data",
".",
"decode",
"(",
"'utf-8'",
")",
")"
] |
Save the calendar instance to a file
|
[
"Save",
"the",
"calendar",
"instance",
"to",
"a",
"file"
] |
cdb55a226cd0cb6cc214d896a6cea41a5b92c9ed
|
https://github.com/albertyw/csv-ical/blob/cdb55a226cd0cb6cc214d896a6cea41a5b92c9ed/csv_ical/convert.py#L86-L90
|
train
|
albertyw/csv-ical
|
csv_ical/convert.py
|
Convert.save_csv
|
def save_csv(self, csv_location): # type: (str) -> None
""" Save the csv to a file """
with open(csv_location, 'w') as csv_handle:
writer = csv.writer(csv_handle)
for row in self.csv_data:
writer.writerow(row)
|
python
|
def save_csv(self, csv_location): # type: (str) -> None
""" Save the csv to a file """
with open(csv_location, 'w') as csv_handle:
writer = csv.writer(csv_handle)
for row in self.csv_data:
writer.writerow(row)
|
[
"def",
"save_csv",
"(",
"self",
",",
"csv_location",
")",
":",
"# type: (str) -> None",
"with",
"open",
"(",
"csv_location",
",",
"'w'",
")",
"as",
"csv_handle",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"csv_handle",
")",
"for",
"row",
"in",
"self",
".",
"csv_data",
":",
"writer",
".",
"writerow",
"(",
"row",
")"
] |
Save the csv to a file
|
[
"Save",
"the",
"csv",
"to",
"a",
"file"
] |
cdb55a226cd0cb6cc214d896a6cea41a5b92c9ed
|
https://github.com/albertyw/csv-ical/blob/cdb55a226cd0cb6cc214d896a6cea41a5b92c9ed/csv_ical/convert.py#L92-L97
|
train
|
planetarypy/planetaryimage
|
planetaryimage/image.py
|
PlanetaryImage.open
|
def open(cls, filename):
""" Read an image file from disk
Parameters
----------
filename : string
Name of file to read as an image file. This file may be gzip
(``.gz``) or bzip2 (``.bz2``) compressed.
"""
if filename.endswith('.gz'):
fp = gzip.open(filename, 'rb')
try:
return cls(fp, filename, compression='gz')
finally:
fp.close()
elif filename.endswith('.bz2'):
fp = bz2.BZ2File(filename, 'rb')
try:
return cls(fp, filename, compression='bz2')
finally:
fp.close()
else:
with open(filename, 'rb') as fp:
return cls(fp, filename)
|
python
|
def open(cls, filename):
""" Read an image file from disk
Parameters
----------
filename : string
Name of file to read as an image file. This file may be gzip
(``.gz``) or bzip2 (``.bz2``) compressed.
"""
if filename.endswith('.gz'):
fp = gzip.open(filename, 'rb')
try:
return cls(fp, filename, compression='gz')
finally:
fp.close()
elif filename.endswith('.bz2'):
fp = bz2.BZ2File(filename, 'rb')
try:
return cls(fp, filename, compression='bz2')
finally:
fp.close()
else:
with open(filename, 'rb') as fp:
return cls(fp, filename)
|
[
"def",
"open",
"(",
"cls",
",",
"filename",
")",
":",
"if",
"filename",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"fp",
"=",
"gzip",
".",
"open",
"(",
"filename",
",",
"'rb'",
")",
"try",
":",
"return",
"cls",
"(",
"fp",
",",
"filename",
",",
"compression",
"=",
"'gz'",
")",
"finally",
":",
"fp",
".",
"close",
"(",
")",
"elif",
"filename",
".",
"endswith",
"(",
"'.bz2'",
")",
":",
"fp",
"=",
"bz2",
".",
"BZ2File",
"(",
"filename",
",",
"'rb'",
")",
"try",
":",
"return",
"cls",
"(",
"fp",
",",
"filename",
",",
"compression",
"=",
"'bz2'",
")",
"finally",
":",
"fp",
".",
"close",
"(",
")",
"else",
":",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"fp",
":",
"return",
"cls",
"(",
"fp",
",",
"filename",
")"
] |
Read an image file from disk
Parameters
----------
filename : string
Name of file to read as an image file. This file may be gzip
(``.gz``) or bzip2 (``.bz2``) compressed.
|
[
"Read",
"an",
"image",
"file",
"from",
"disk"
] |
ee9aef4746ff7a003b1457565acb13f5f1db0375
|
https://github.com/planetarypy/planetaryimage/blob/ee9aef4746ff7a003b1457565acb13f5f1db0375/planetaryimage/image.py#L69-L92
|
train
|
planetarypy/planetaryimage
|
planetaryimage/image.py
|
PlanetaryImage.image
|
def image(self):
"""An Image like array of ``self.data`` convenient for image processing tasks
* 2D array for single band, grayscale image data
* 3D array for three band, RGB image data
Enables working with ``self.data`` as if it were a PIL image.
See https://planetaryimage.readthedocs.io/en/latest/usage.html to see
how to open images to view them and make manipulations.
"""
if self.bands == 1:
return self.data.squeeze()
elif self.bands == 3:
return numpy.dstack(self.data)
|
python
|
def image(self):
"""An Image like array of ``self.data`` convenient for image processing tasks
* 2D array for single band, grayscale image data
* 3D array for three band, RGB image data
Enables working with ``self.data`` as if it were a PIL image.
See https://planetaryimage.readthedocs.io/en/latest/usage.html to see
how to open images to view them and make manipulations.
"""
if self.bands == 1:
return self.data.squeeze()
elif self.bands == 3:
return numpy.dstack(self.data)
|
[
"def",
"image",
"(",
"self",
")",
":",
"if",
"self",
".",
"bands",
"==",
"1",
":",
"return",
"self",
".",
"data",
".",
"squeeze",
"(",
")",
"elif",
"self",
".",
"bands",
"==",
"3",
":",
"return",
"numpy",
".",
"dstack",
"(",
"self",
".",
"data",
")"
] |
An Image like array of ``self.data`` convenient for image processing tasks
* 2D array for single band, grayscale image data
* 3D array for three band, RGB image data
Enables working with ``self.data`` as if it were a PIL image.
See https://planetaryimage.readthedocs.io/en/latest/usage.html to see
how to open images to view them and make manipulations.
|
[
"An",
"Image",
"like",
"array",
"of",
"self",
".",
"data",
"convenient",
"for",
"image",
"processing",
"tasks"
] |
ee9aef4746ff7a003b1457565acb13f5f1db0375
|
https://github.com/planetarypy/planetaryimage/blob/ee9aef4746ff7a003b1457565acb13f5f1db0375/planetaryimage/image.py#L131-L146
|
train
|
planetarypy/planetaryimage
|
planetaryimage/cubefile.py
|
CubeFile.apply_numpy_specials
|
def apply_numpy_specials(self, copy=True):
"""Convert isis special pixel values to numpy special pixel values.
======= =======
Isis Numpy
======= =======
Null nan
Lrs -inf
Lis -inf
His inf
Hrs inf
======= =======
Parameters
----------
copy : bool [True]
Whether to apply the new special values to a copy of the
pixel data and leave the original unaffected
Returns
-------
Numpy Array
A numpy array with special values converted to numpy's nan, inf,
and -inf
"""
if copy:
data = self.data.astype(numpy.float64)
elif self.data.dtype != numpy.float64:
data = self.data = self.data.astype(numpy.float64)
else:
data = self.data
data[data == self.specials['Null']] = numpy.nan
data[data < self.specials['Min']] = numpy.NINF
data[data > self.specials['Max']] = numpy.inf
return data
|
python
|
def apply_numpy_specials(self, copy=True):
"""Convert isis special pixel values to numpy special pixel values.
======= =======
Isis Numpy
======= =======
Null nan
Lrs -inf
Lis -inf
His inf
Hrs inf
======= =======
Parameters
----------
copy : bool [True]
Whether to apply the new special values to a copy of the
pixel data and leave the original unaffected
Returns
-------
Numpy Array
A numpy array with special values converted to numpy's nan, inf,
and -inf
"""
if copy:
data = self.data.astype(numpy.float64)
elif self.data.dtype != numpy.float64:
data = self.data = self.data.astype(numpy.float64)
else:
data = self.data
data[data == self.specials['Null']] = numpy.nan
data[data < self.specials['Min']] = numpy.NINF
data[data > self.specials['Max']] = numpy.inf
return data
|
[
"def",
"apply_numpy_specials",
"(",
"self",
",",
"copy",
"=",
"True",
")",
":",
"if",
"copy",
":",
"data",
"=",
"self",
".",
"data",
".",
"astype",
"(",
"numpy",
".",
"float64",
")",
"elif",
"self",
".",
"data",
".",
"dtype",
"!=",
"numpy",
".",
"float64",
":",
"data",
"=",
"self",
".",
"data",
"=",
"self",
".",
"data",
".",
"astype",
"(",
"numpy",
".",
"float64",
")",
"else",
":",
"data",
"=",
"self",
".",
"data",
"data",
"[",
"data",
"==",
"self",
".",
"specials",
"[",
"'Null'",
"]",
"]",
"=",
"numpy",
".",
"nan",
"data",
"[",
"data",
"<",
"self",
".",
"specials",
"[",
"'Min'",
"]",
"]",
"=",
"numpy",
".",
"NINF",
"data",
"[",
"data",
">",
"self",
".",
"specials",
"[",
"'Max'",
"]",
"]",
"=",
"numpy",
".",
"inf",
"return",
"data"
] |
Convert isis special pixel values to numpy special pixel values.
======= =======
Isis Numpy
======= =======
Null nan
Lrs -inf
Lis -inf
His inf
Hrs inf
======= =======
Parameters
----------
copy : bool [True]
Whether to apply the new special values to a copy of the
pixel data and leave the original unaffected
Returns
-------
Numpy Array
A numpy array with special values converted to numpy's nan, inf,
and -inf
|
[
"Convert",
"isis",
"special",
"pixel",
"values",
"to",
"numpy",
"special",
"pixel",
"values",
"."
] |
ee9aef4746ff7a003b1457565acb13f5f1db0375
|
https://github.com/planetarypy/planetaryimage/blob/ee9aef4746ff7a003b1457565acb13f5f1db0375/planetaryimage/cubefile.py#L161-L199
|
train
|
planetarypy/planetaryimage
|
planetaryimage/pds3image.py
|
Pointer.parse
|
def parse(cls, value, record_bytes):
"""Parses the pointer label.
Parameters
----------
pointer_data
Supported values for `pointer_data` are::
^PTR = nnn
^PTR = nnn <BYTES>
^PTR = "filename"
^PTR = ("filename")
^PTR = ("filename", nnn)
^PTR = ("filename", nnn <BYTES>)
record_bytes
Record multiplier value
Returns
-------
Pointer object
"""
if isinstance(value, six.string_types):
return cls(value, 0)
if isinstance(value, list):
if len(value) == 1:
return cls(value[0], 0)
if len(value) == 2:
return cls(value[0], cls._parse_bytes(value[1], record_bytes))
raise ValueError('Unsupported pointer type')
return cls(None, cls._parse_bytes(value, record_bytes))
|
python
|
def parse(cls, value, record_bytes):
"""Parses the pointer label.
Parameters
----------
pointer_data
Supported values for `pointer_data` are::
^PTR = nnn
^PTR = nnn <BYTES>
^PTR = "filename"
^PTR = ("filename")
^PTR = ("filename", nnn)
^PTR = ("filename", nnn <BYTES>)
record_bytes
Record multiplier value
Returns
-------
Pointer object
"""
if isinstance(value, six.string_types):
return cls(value, 0)
if isinstance(value, list):
if len(value) == 1:
return cls(value[0], 0)
if len(value) == 2:
return cls(value[0], cls._parse_bytes(value[1], record_bytes))
raise ValueError('Unsupported pointer type')
return cls(None, cls._parse_bytes(value, record_bytes))
|
[
"def",
"parse",
"(",
"cls",
",",
"value",
",",
"record_bytes",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"return",
"cls",
"(",
"value",
",",
"0",
")",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"if",
"len",
"(",
"value",
")",
"==",
"1",
":",
"return",
"cls",
"(",
"value",
"[",
"0",
"]",
",",
"0",
")",
"if",
"len",
"(",
"value",
")",
"==",
"2",
":",
"return",
"cls",
"(",
"value",
"[",
"0",
"]",
",",
"cls",
".",
"_parse_bytes",
"(",
"value",
"[",
"1",
"]",
",",
"record_bytes",
")",
")",
"raise",
"ValueError",
"(",
"'Unsupported pointer type'",
")",
"return",
"cls",
"(",
"None",
",",
"cls",
".",
"_parse_bytes",
"(",
"value",
",",
"record_bytes",
")",
")"
] |
Parses the pointer label.
Parameters
----------
pointer_data
Supported values for `pointer_data` are::
^PTR = nnn
^PTR = nnn <BYTES>
^PTR = "filename"
^PTR = ("filename")
^PTR = ("filename", nnn)
^PTR = ("filename", nnn <BYTES>)
record_bytes
Record multiplier value
Returns
-------
Pointer object
|
[
"Parses",
"the",
"pointer",
"label",
"."
] |
ee9aef4746ff7a003b1457565acb13f5f1db0375
|
https://github.com/planetarypy/planetaryimage/blob/ee9aef4746ff7a003b1457565acb13f5f1db0375/planetaryimage/pds3image.py#L24-L58
|
train
|
planetarypy/planetaryimage
|
planetaryimage/pds3image.py
|
PDS3Image._save
|
def _save(self, file_to_write, overwrite):
"""Save PDS3Image object as PDS3 file.
Parameters
----------
filename: Set filename for the pds image to be saved.
Overwrite: Use this keyword to save image with same filename.
Usage: image.save('temp.IMG', overwrite=True)
"""
if overwrite:
file_to_write = self.filename
elif os.path.isfile(file_to_write):
msg = 'File ' + file_to_write + ' already exists !\n' + \
'Call save() with "overwrite = True" to overwrite the file.'
raise IOError(msg)
encoder = pvl.encoder.PDSLabelEncoder
serial_label = pvl.dumps(self.label, cls=encoder)
label_sz = len(serial_label)
image_pointer = int(label_sz / self.label['RECORD_BYTES']) + 1
self.label['^IMAGE'] = image_pointer + 1
if self._sample_bytes != self.label['IMAGE']['SAMPLE_BITS'] * 8:
self.label['IMAGE']['SAMPLE_BITS'] = self.data.itemsize * 8
sample_type_to_save = self.DTYPES[self._sample_type[0] + self.dtype.kind]
self.label['IMAGE']['SAMPLE_TYPE'] = sample_type_to_save
if len(self.data.shape) == 3:
self.label['IMAGE']['BANDS'] = self.data.shape[0]
self.label['IMAGE']['LINES'] = self.data.shape[1]
self.label['IMAGE']['LINE_SAMPLES'] = self.data.shape[2]
else:
self.label['IMAGE']['BANDS'] = 1
self.label['IMAGE']['LINES'] = self.data.shape[0]
self.label['IMAGE']['LINE_SAMPLES'] = self.data.shape[1]
diff = 0
if len(pvl.dumps(self.label, cls=encoder)) != label_sz:
diff = abs(label_sz - len(pvl.dumps(self.label, cls=encoder)))
pvl.dump(self.label, file_to_write, cls=encoder)
offset = image_pointer * self.label['RECORD_BYTES'] - label_sz
stream = open(file_to_write, 'a')
for i in range(0, offset+diff):
stream.write(" ")
if (self._bands > 1 and self._format != 'BAND_SEQUENTIAL'):
raise NotImplementedError
else:
self.data.tofile(stream, format='%' + self.dtype.kind)
stream.close()
|
python
|
def _save(self, file_to_write, overwrite):
"""Save PDS3Image object as PDS3 file.
Parameters
----------
filename: Set filename for the pds image to be saved.
Overwrite: Use this keyword to save image with same filename.
Usage: image.save('temp.IMG', overwrite=True)
"""
if overwrite:
file_to_write = self.filename
elif os.path.isfile(file_to_write):
msg = 'File ' + file_to_write + ' already exists !\n' + \
'Call save() with "overwrite = True" to overwrite the file.'
raise IOError(msg)
encoder = pvl.encoder.PDSLabelEncoder
serial_label = pvl.dumps(self.label, cls=encoder)
label_sz = len(serial_label)
image_pointer = int(label_sz / self.label['RECORD_BYTES']) + 1
self.label['^IMAGE'] = image_pointer + 1
if self._sample_bytes != self.label['IMAGE']['SAMPLE_BITS'] * 8:
self.label['IMAGE']['SAMPLE_BITS'] = self.data.itemsize * 8
sample_type_to_save = self.DTYPES[self._sample_type[0] + self.dtype.kind]
self.label['IMAGE']['SAMPLE_TYPE'] = sample_type_to_save
if len(self.data.shape) == 3:
self.label['IMAGE']['BANDS'] = self.data.shape[0]
self.label['IMAGE']['LINES'] = self.data.shape[1]
self.label['IMAGE']['LINE_SAMPLES'] = self.data.shape[2]
else:
self.label['IMAGE']['BANDS'] = 1
self.label['IMAGE']['LINES'] = self.data.shape[0]
self.label['IMAGE']['LINE_SAMPLES'] = self.data.shape[1]
diff = 0
if len(pvl.dumps(self.label, cls=encoder)) != label_sz:
diff = abs(label_sz - len(pvl.dumps(self.label, cls=encoder)))
pvl.dump(self.label, file_to_write, cls=encoder)
offset = image_pointer * self.label['RECORD_BYTES'] - label_sz
stream = open(file_to_write, 'a')
for i in range(0, offset+diff):
stream.write(" ")
if (self._bands > 1 and self._format != 'BAND_SEQUENTIAL'):
raise NotImplementedError
else:
self.data.tofile(stream, format='%' + self.dtype.kind)
stream.close()
|
[
"def",
"_save",
"(",
"self",
",",
"file_to_write",
",",
"overwrite",
")",
":",
"if",
"overwrite",
":",
"file_to_write",
"=",
"self",
".",
"filename",
"elif",
"os",
".",
"path",
".",
"isfile",
"(",
"file_to_write",
")",
":",
"msg",
"=",
"'File '",
"+",
"file_to_write",
"+",
"' already exists !\\n'",
"+",
"'Call save() with \"overwrite = True\" to overwrite the file.'",
"raise",
"IOError",
"(",
"msg",
")",
"encoder",
"=",
"pvl",
".",
"encoder",
".",
"PDSLabelEncoder",
"serial_label",
"=",
"pvl",
".",
"dumps",
"(",
"self",
".",
"label",
",",
"cls",
"=",
"encoder",
")",
"label_sz",
"=",
"len",
"(",
"serial_label",
")",
"image_pointer",
"=",
"int",
"(",
"label_sz",
"/",
"self",
".",
"label",
"[",
"'RECORD_BYTES'",
"]",
")",
"+",
"1",
"self",
".",
"label",
"[",
"'^IMAGE'",
"]",
"=",
"image_pointer",
"+",
"1",
"if",
"self",
".",
"_sample_bytes",
"!=",
"self",
".",
"label",
"[",
"'IMAGE'",
"]",
"[",
"'SAMPLE_BITS'",
"]",
"*",
"8",
":",
"self",
".",
"label",
"[",
"'IMAGE'",
"]",
"[",
"'SAMPLE_BITS'",
"]",
"=",
"self",
".",
"data",
".",
"itemsize",
"*",
"8",
"sample_type_to_save",
"=",
"self",
".",
"DTYPES",
"[",
"self",
".",
"_sample_type",
"[",
"0",
"]",
"+",
"self",
".",
"dtype",
".",
"kind",
"]",
"self",
".",
"label",
"[",
"'IMAGE'",
"]",
"[",
"'SAMPLE_TYPE'",
"]",
"=",
"sample_type_to_save",
"if",
"len",
"(",
"self",
".",
"data",
".",
"shape",
")",
"==",
"3",
":",
"self",
".",
"label",
"[",
"'IMAGE'",
"]",
"[",
"'BANDS'",
"]",
"=",
"self",
".",
"data",
".",
"shape",
"[",
"0",
"]",
"self",
".",
"label",
"[",
"'IMAGE'",
"]",
"[",
"'LINES'",
"]",
"=",
"self",
".",
"data",
".",
"shape",
"[",
"1",
"]",
"self",
".",
"label",
"[",
"'IMAGE'",
"]",
"[",
"'LINE_SAMPLES'",
"]",
"=",
"self",
".",
"data",
".",
"shape",
"[",
"2",
"]",
"else",
":",
"self",
".",
"label",
"[",
"'IMAGE'",
"]",
"[",
"'BANDS'",
"]",
"=",
"1",
"self",
".",
"label",
"[",
"'IMAGE'",
"]",
"[",
"'LINES'",
"]",
"=",
"self",
".",
"data",
".",
"shape",
"[",
"0",
"]",
"self",
".",
"label",
"[",
"'IMAGE'",
"]",
"[",
"'LINE_SAMPLES'",
"]",
"=",
"self",
".",
"data",
".",
"shape",
"[",
"1",
"]",
"diff",
"=",
"0",
"if",
"len",
"(",
"pvl",
".",
"dumps",
"(",
"self",
".",
"label",
",",
"cls",
"=",
"encoder",
")",
")",
"!=",
"label_sz",
":",
"diff",
"=",
"abs",
"(",
"label_sz",
"-",
"len",
"(",
"pvl",
".",
"dumps",
"(",
"self",
".",
"label",
",",
"cls",
"=",
"encoder",
")",
")",
")",
"pvl",
".",
"dump",
"(",
"self",
".",
"label",
",",
"file_to_write",
",",
"cls",
"=",
"encoder",
")",
"offset",
"=",
"image_pointer",
"*",
"self",
".",
"label",
"[",
"'RECORD_BYTES'",
"]",
"-",
"label_sz",
"stream",
"=",
"open",
"(",
"file_to_write",
",",
"'a'",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"offset",
"+",
"diff",
")",
":",
"stream",
".",
"write",
"(",
"\" \"",
")",
"if",
"(",
"self",
".",
"_bands",
">",
"1",
"and",
"self",
".",
"_format",
"!=",
"'BAND_SEQUENTIAL'",
")",
":",
"raise",
"NotImplementedError",
"else",
":",
"self",
".",
"data",
".",
"tofile",
"(",
"stream",
",",
"format",
"=",
"'%'",
"+",
"self",
".",
"dtype",
".",
"kind",
")",
"stream",
".",
"close",
"(",
")"
] |
Save PDS3Image object as PDS3 file.
Parameters
----------
filename: Set filename for the pds image to be saved.
Overwrite: Use this keyword to save image with same filename.
Usage: image.save('temp.IMG', overwrite=True)
|
[
"Save",
"PDS3Image",
"object",
"as",
"PDS3",
"file",
"."
] |
ee9aef4746ff7a003b1457565acb13f5f1db0375
|
https://github.com/planetarypy/planetaryimage/blob/ee9aef4746ff7a003b1457565acb13f5f1db0375/planetaryimage/pds3image.py#L129-L181
|
train
|
planetarypy/planetaryimage
|
planetaryimage/pds3image.py
|
PDS3Image._create_label
|
def _create_label(self, array):
"""Create sample PDS3 label for NumPy Array.
It is called by 'image.py' to create PDS3Image object
from Numpy Array.
Returns
-------
PVLModule label for the given NumPy array.
Usage: self.label = _create_label(array)
"""
if len(array.shape) == 3:
bands = array.shape[0]
lines = array.shape[1]
line_samples = array.shape[2]
else:
bands = 1
lines = array.shape[0]
line_samples = array.shape[1]
record_bytes = line_samples * array.itemsize
label_module = pvl.PVLModule([
('PDS_VERSION_ID', 'PDS3'),
('RECORD_TYPE', 'FIXED_LENGTH'),
('RECORD_BYTES', record_bytes),
('LABEL_RECORDS', 1),
('^IMAGE', 1),
('IMAGE',
{'BANDS': bands,
'LINES': lines,
'LINE_SAMPLES': line_samples,
'MAXIMUM': 0,
'MEAN': 0,
'MEDIAN': 0,
'MINIMUM': 0,
'SAMPLE_BITS': array.itemsize * 8,
'SAMPLE_TYPE': 'MSB_INTEGER',
'STANDARD_DEVIATION': 0})
])
return self._update_label(label_module, array)
|
python
|
def _create_label(self, array):
"""Create sample PDS3 label for NumPy Array.
It is called by 'image.py' to create PDS3Image object
from Numpy Array.
Returns
-------
PVLModule label for the given NumPy array.
Usage: self.label = _create_label(array)
"""
if len(array.shape) == 3:
bands = array.shape[0]
lines = array.shape[1]
line_samples = array.shape[2]
else:
bands = 1
lines = array.shape[0]
line_samples = array.shape[1]
record_bytes = line_samples * array.itemsize
label_module = pvl.PVLModule([
('PDS_VERSION_ID', 'PDS3'),
('RECORD_TYPE', 'FIXED_LENGTH'),
('RECORD_BYTES', record_bytes),
('LABEL_RECORDS', 1),
('^IMAGE', 1),
('IMAGE',
{'BANDS': bands,
'LINES': lines,
'LINE_SAMPLES': line_samples,
'MAXIMUM': 0,
'MEAN': 0,
'MEDIAN': 0,
'MINIMUM': 0,
'SAMPLE_BITS': array.itemsize * 8,
'SAMPLE_TYPE': 'MSB_INTEGER',
'STANDARD_DEVIATION': 0})
])
return self._update_label(label_module, array)
|
[
"def",
"_create_label",
"(",
"self",
",",
"array",
")",
":",
"if",
"len",
"(",
"array",
".",
"shape",
")",
"==",
"3",
":",
"bands",
"=",
"array",
".",
"shape",
"[",
"0",
"]",
"lines",
"=",
"array",
".",
"shape",
"[",
"1",
"]",
"line_samples",
"=",
"array",
".",
"shape",
"[",
"2",
"]",
"else",
":",
"bands",
"=",
"1",
"lines",
"=",
"array",
".",
"shape",
"[",
"0",
"]",
"line_samples",
"=",
"array",
".",
"shape",
"[",
"1",
"]",
"record_bytes",
"=",
"line_samples",
"*",
"array",
".",
"itemsize",
"label_module",
"=",
"pvl",
".",
"PVLModule",
"(",
"[",
"(",
"'PDS_VERSION_ID'",
",",
"'PDS3'",
")",
",",
"(",
"'RECORD_TYPE'",
",",
"'FIXED_LENGTH'",
")",
",",
"(",
"'RECORD_BYTES'",
",",
"record_bytes",
")",
",",
"(",
"'LABEL_RECORDS'",
",",
"1",
")",
",",
"(",
"'^IMAGE'",
",",
"1",
")",
",",
"(",
"'IMAGE'",
",",
"{",
"'BANDS'",
":",
"bands",
",",
"'LINES'",
":",
"lines",
",",
"'LINE_SAMPLES'",
":",
"line_samples",
",",
"'MAXIMUM'",
":",
"0",
",",
"'MEAN'",
":",
"0",
",",
"'MEDIAN'",
":",
"0",
",",
"'MINIMUM'",
":",
"0",
",",
"'SAMPLE_BITS'",
":",
"array",
".",
"itemsize",
"*",
"8",
",",
"'SAMPLE_TYPE'",
":",
"'MSB_INTEGER'",
",",
"'STANDARD_DEVIATION'",
":",
"0",
"}",
")",
"]",
")",
"return",
"self",
".",
"_update_label",
"(",
"label_module",
",",
"array",
")"
] |
Create sample PDS3 label for NumPy Array.
It is called by 'image.py' to create PDS3Image object
from Numpy Array.
Returns
-------
PVLModule label for the given NumPy array.
Usage: self.label = _create_label(array)
|
[
"Create",
"sample",
"PDS3",
"label",
"for",
"NumPy",
"Array",
".",
"It",
"is",
"called",
"by",
"image",
".",
"py",
"to",
"create",
"PDS3Image",
"object",
"from",
"Numpy",
"Array",
"."
] |
ee9aef4746ff7a003b1457565acb13f5f1db0375
|
https://github.com/planetarypy/planetaryimage/blob/ee9aef4746ff7a003b1457565acb13f5f1db0375/planetaryimage/pds3image.py#L183-L222
|
train
|
planetarypy/planetaryimage
|
planetaryimage/pds3image.py
|
PDS3Image._update_label
|
def _update_label(self, label, array):
"""Update PDS3 label for NumPy Array.
It is called by '_create_label' to update label values
such as,
- ^IMAGE, RECORD_BYTES
- STANDARD_DEVIATION
- MAXIMUM, MINIMUM
- MEDIAN, MEAN
Returns
-------
Update label module for the NumPy array.
Usage: self.label = self._update_label(label, array)
"""
maximum = float(numpy.max(array))
mean = float(numpy.mean(array))
median = float(numpy.median(array))
minimum = float(numpy.min(array))
stdev = float(numpy.std(array, ddof=1))
encoder = pvl.encoder.PDSLabelEncoder
serial_label = pvl.dumps(label, cls=encoder)
label_sz = len(serial_label)
image_pointer = int(label_sz / label['RECORD_BYTES']) + 1
label['^IMAGE'] = image_pointer + 1
label['LABEL_RECORDS'] = image_pointer
label['IMAGE']['MEAN'] = mean
label['IMAGE']['MAXIMUM'] = maximum
label['IMAGE']['MEDIAN'] = median
label['IMAGE']['MINIMUM'] = minimum
label['IMAGE']['STANDARD_DEVIATION'] = stdev
return label
|
python
|
def _update_label(self, label, array):
"""Update PDS3 label for NumPy Array.
It is called by '_create_label' to update label values
such as,
- ^IMAGE, RECORD_BYTES
- STANDARD_DEVIATION
- MAXIMUM, MINIMUM
- MEDIAN, MEAN
Returns
-------
Update label module for the NumPy array.
Usage: self.label = self._update_label(label, array)
"""
maximum = float(numpy.max(array))
mean = float(numpy.mean(array))
median = float(numpy.median(array))
minimum = float(numpy.min(array))
stdev = float(numpy.std(array, ddof=1))
encoder = pvl.encoder.PDSLabelEncoder
serial_label = pvl.dumps(label, cls=encoder)
label_sz = len(serial_label)
image_pointer = int(label_sz / label['RECORD_BYTES']) + 1
label['^IMAGE'] = image_pointer + 1
label['LABEL_RECORDS'] = image_pointer
label['IMAGE']['MEAN'] = mean
label['IMAGE']['MAXIMUM'] = maximum
label['IMAGE']['MEDIAN'] = median
label['IMAGE']['MINIMUM'] = minimum
label['IMAGE']['STANDARD_DEVIATION'] = stdev
return label
|
[
"def",
"_update_label",
"(",
"self",
",",
"label",
",",
"array",
")",
":",
"maximum",
"=",
"float",
"(",
"numpy",
".",
"max",
"(",
"array",
")",
")",
"mean",
"=",
"float",
"(",
"numpy",
".",
"mean",
"(",
"array",
")",
")",
"median",
"=",
"float",
"(",
"numpy",
".",
"median",
"(",
"array",
")",
")",
"minimum",
"=",
"float",
"(",
"numpy",
".",
"min",
"(",
"array",
")",
")",
"stdev",
"=",
"float",
"(",
"numpy",
".",
"std",
"(",
"array",
",",
"ddof",
"=",
"1",
")",
")",
"encoder",
"=",
"pvl",
".",
"encoder",
".",
"PDSLabelEncoder",
"serial_label",
"=",
"pvl",
".",
"dumps",
"(",
"label",
",",
"cls",
"=",
"encoder",
")",
"label_sz",
"=",
"len",
"(",
"serial_label",
")",
"image_pointer",
"=",
"int",
"(",
"label_sz",
"/",
"label",
"[",
"'RECORD_BYTES'",
"]",
")",
"+",
"1",
"label",
"[",
"'^IMAGE'",
"]",
"=",
"image_pointer",
"+",
"1",
"label",
"[",
"'LABEL_RECORDS'",
"]",
"=",
"image_pointer",
"label",
"[",
"'IMAGE'",
"]",
"[",
"'MEAN'",
"]",
"=",
"mean",
"label",
"[",
"'IMAGE'",
"]",
"[",
"'MAXIMUM'",
"]",
"=",
"maximum",
"label",
"[",
"'IMAGE'",
"]",
"[",
"'MEDIAN'",
"]",
"=",
"median",
"label",
"[",
"'IMAGE'",
"]",
"[",
"'MINIMUM'",
"]",
"=",
"minimum",
"label",
"[",
"'IMAGE'",
"]",
"[",
"'STANDARD_DEVIATION'",
"]",
"=",
"stdev",
"return",
"label"
] |
Update PDS3 label for NumPy Array.
It is called by '_create_label' to update label values
such as,
- ^IMAGE, RECORD_BYTES
- STANDARD_DEVIATION
- MAXIMUM, MINIMUM
- MEDIAN, MEAN
Returns
-------
Update label module for the NumPy array.
Usage: self.label = self._update_label(label, array)
|
[
"Update",
"PDS3",
"label",
"for",
"NumPy",
"Array",
".",
"It",
"is",
"called",
"by",
"_create_label",
"to",
"update",
"label",
"values",
"such",
"as",
"-",
"^IMAGE",
"RECORD_BYTES",
"-",
"STANDARD_DEVIATION",
"-",
"MAXIMUM",
"MINIMUM",
"-",
"MEDIAN",
"MEAN"
] |
ee9aef4746ff7a003b1457565acb13f5f1db0375
|
https://github.com/planetarypy/planetaryimage/blob/ee9aef4746ff7a003b1457565acb13f5f1db0375/planetaryimage/pds3image.py#L224-L259
|
train
|
web-push-libs/encrypted-content-encoding
|
python/http_ece/__init__.py
|
iv
|
def iv(base, counter):
"""Generate an initialization vector.
"""
if (counter >> 64) != 0:
raise ECEException(u"Counter too big")
(mask,) = struct.unpack("!Q", base[4:])
return base[:4] + struct.pack("!Q", counter ^ mask)
|
python
|
def iv(base, counter):
"""Generate an initialization vector.
"""
if (counter >> 64) != 0:
raise ECEException(u"Counter too big")
(mask,) = struct.unpack("!Q", base[4:])
return base[:4] + struct.pack("!Q", counter ^ mask)
|
[
"def",
"iv",
"(",
"base",
",",
"counter",
")",
":",
"if",
"(",
"counter",
">>",
"64",
")",
"!=",
"0",
":",
"raise",
"ECEException",
"(",
"u\"Counter too big\"",
")",
"(",
"mask",
",",
")",
"=",
"struct",
".",
"unpack",
"(",
"\"!Q\"",
",",
"base",
"[",
"4",
":",
"]",
")",
"return",
"base",
"[",
":",
"4",
"]",
"+",
"struct",
".",
"pack",
"(",
"\"!Q\"",
",",
"counter",
"^",
"mask",
")"
] |
Generate an initialization vector.
|
[
"Generate",
"an",
"initialization",
"vector",
"."
] |
849aebea751752e17fc84a64ce1bbf65dc994e6c
|
https://github.com/web-push-libs/encrypted-content-encoding/blob/849aebea751752e17fc84a64ce1bbf65dc994e6c/python/http_ece/__init__.py#L164-L171
|
train
|
web-push-libs/encrypted-content-encoding
|
python/http_ece/__init__.py
|
encrypt
|
def encrypt(content, salt=None, key=None,
private_key=None, dh=None, auth_secret=None,
keyid=None, keylabel="P-256",
rs=4096, version="aes128gcm"):
"""
Encrypt a data block
:param content: block of data to encrypt
:type content: str
:param salt: Encryption salt
:type salt: str
:param key: Encryption key data
:type key: str
:param private_key: DH private key
:type key: object
:param keyid: Internal key identifier for private key info
:type keyid: str
:param dh: Remote Diffie Hellman sequence
:type dh: str
:param rs: Record size
:type rs: int
:param auth_secret: Authorization secret
:type auth_secret: str
:param version: ECE Method version
:type version: enumerate('aes128gcm', 'aesgcm', 'aesgcm128')
:return: Encrypted message content
:rtype str
"""
def encrypt_record(key, nonce, counter, buf, last):
encryptor = Cipher(
algorithms.AES(key),
modes.GCM(iv(nonce, counter)),
backend=default_backend()
).encryptor()
if version == 'aes128gcm':
data = encryptor.update(buf + (b'\x02' if last else b'\x01'))
else:
data = encryptor.update((b"\x00" * versions[version]['pad']) + buf)
data += encryptor.finalize()
data += encryptor.tag
return data
def compose_aes128gcm(salt, content, rs, keyid):
"""Compose the header and content of an aes128gcm encrypted
message body
:param salt: The sender's salt value
:type salt: str
:param content: The encrypted body of the message
:type content: str
:param rs: Override for the content length
:type rs: int
:param keyid: The keyid to use for this message
:type keyid: str
"""
if len(keyid) > 255:
raise ECEException("keyid is too long")
header = salt
if rs > MAX_RECORD_SIZE:
raise ECEException("Too much content")
header += struct.pack("!L", rs)
header += struct.pack("!B", len(keyid))
header += keyid
return header + content
if version not in versions:
raise ECEException(u"Invalid version")
if salt is None:
salt = os.urandom(16)
(key_, nonce_) = derive_key("encrypt", version=version,
salt=salt, key=key,
private_key=private_key, dh=dh,
auth_secret=auth_secret,
keyid=keyid, keylabel=keylabel)
overhead = versions[version]['pad']
if version == 'aes128gcm':
overhead += 16
end = len(content)
else:
end = len(content) + 1
if rs <= overhead:
raise ECEException(u"Record size too small")
chunk_size = rs - overhead
result = b""
counter = 0
# the extra one on the loop ensures that we produce a padding only
# record if the data length is an exact multiple of the chunk size
for i in list(range(0, end, chunk_size)):
result += encrypt_record(key_, nonce_, counter,
content[i:i + chunk_size],
(i + chunk_size) >= end)
counter += 1
if version == "aes128gcm":
if keyid is None and private_key is not None:
kid = private_key.public_key().public_bytes(
Encoding.X962,
PublicFormat.UncompressedPoint)
else:
kid = (keyid or '').encode('utf-8')
return compose_aes128gcm(salt, result, rs, keyid=kid)
return result
|
python
|
def encrypt(content, salt=None, key=None,
private_key=None, dh=None, auth_secret=None,
keyid=None, keylabel="P-256",
rs=4096, version="aes128gcm"):
"""
Encrypt a data block
:param content: block of data to encrypt
:type content: str
:param salt: Encryption salt
:type salt: str
:param key: Encryption key data
:type key: str
:param private_key: DH private key
:type key: object
:param keyid: Internal key identifier for private key info
:type keyid: str
:param dh: Remote Diffie Hellman sequence
:type dh: str
:param rs: Record size
:type rs: int
:param auth_secret: Authorization secret
:type auth_secret: str
:param version: ECE Method version
:type version: enumerate('aes128gcm', 'aesgcm', 'aesgcm128')
:return: Encrypted message content
:rtype str
"""
def encrypt_record(key, nonce, counter, buf, last):
encryptor = Cipher(
algorithms.AES(key),
modes.GCM(iv(nonce, counter)),
backend=default_backend()
).encryptor()
if version == 'aes128gcm':
data = encryptor.update(buf + (b'\x02' if last else b'\x01'))
else:
data = encryptor.update((b"\x00" * versions[version]['pad']) + buf)
data += encryptor.finalize()
data += encryptor.tag
return data
def compose_aes128gcm(salt, content, rs, keyid):
"""Compose the header and content of an aes128gcm encrypted
message body
:param salt: The sender's salt value
:type salt: str
:param content: The encrypted body of the message
:type content: str
:param rs: Override for the content length
:type rs: int
:param keyid: The keyid to use for this message
:type keyid: str
"""
if len(keyid) > 255:
raise ECEException("keyid is too long")
header = salt
if rs > MAX_RECORD_SIZE:
raise ECEException("Too much content")
header += struct.pack("!L", rs)
header += struct.pack("!B", len(keyid))
header += keyid
return header + content
if version not in versions:
raise ECEException(u"Invalid version")
if salt is None:
salt = os.urandom(16)
(key_, nonce_) = derive_key("encrypt", version=version,
salt=salt, key=key,
private_key=private_key, dh=dh,
auth_secret=auth_secret,
keyid=keyid, keylabel=keylabel)
overhead = versions[version]['pad']
if version == 'aes128gcm':
overhead += 16
end = len(content)
else:
end = len(content) + 1
if rs <= overhead:
raise ECEException(u"Record size too small")
chunk_size = rs - overhead
result = b""
counter = 0
# the extra one on the loop ensures that we produce a padding only
# record if the data length is an exact multiple of the chunk size
for i in list(range(0, end, chunk_size)):
result += encrypt_record(key_, nonce_, counter,
content[i:i + chunk_size],
(i + chunk_size) >= end)
counter += 1
if version == "aes128gcm":
if keyid is None and private_key is not None:
kid = private_key.public_key().public_bytes(
Encoding.X962,
PublicFormat.UncompressedPoint)
else:
kid = (keyid or '').encode('utf-8')
return compose_aes128gcm(salt, result, rs, keyid=kid)
return result
|
[
"def",
"encrypt",
"(",
"content",
",",
"salt",
"=",
"None",
",",
"key",
"=",
"None",
",",
"private_key",
"=",
"None",
",",
"dh",
"=",
"None",
",",
"auth_secret",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"keylabel",
"=",
"\"P-256\"",
",",
"rs",
"=",
"4096",
",",
"version",
"=",
"\"aes128gcm\"",
")",
":",
"def",
"encrypt_record",
"(",
"key",
",",
"nonce",
",",
"counter",
",",
"buf",
",",
"last",
")",
":",
"encryptor",
"=",
"Cipher",
"(",
"algorithms",
".",
"AES",
"(",
"key",
")",
",",
"modes",
".",
"GCM",
"(",
"iv",
"(",
"nonce",
",",
"counter",
")",
")",
",",
"backend",
"=",
"default_backend",
"(",
")",
")",
".",
"encryptor",
"(",
")",
"if",
"version",
"==",
"'aes128gcm'",
":",
"data",
"=",
"encryptor",
".",
"update",
"(",
"buf",
"+",
"(",
"b'\\x02'",
"if",
"last",
"else",
"b'\\x01'",
")",
")",
"else",
":",
"data",
"=",
"encryptor",
".",
"update",
"(",
"(",
"b\"\\x00\"",
"*",
"versions",
"[",
"version",
"]",
"[",
"'pad'",
"]",
")",
"+",
"buf",
")",
"data",
"+=",
"encryptor",
".",
"finalize",
"(",
")",
"data",
"+=",
"encryptor",
".",
"tag",
"return",
"data",
"def",
"compose_aes128gcm",
"(",
"salt",
",",
"content",
",",
"rs",
",",
"keyid",
")",
":",
"\"\"\"Compose the header and content of an aes128gcm encrypted\n message body\n\n :param salt: The sender's salt value\n :type salt: str\n :param content: The encrypted body of the message\n :type content: str\n :param rs: Override for the content length\n :type rs: int\n :param keyid: The keyid to use for this message\n :type keyid: str\n\n \"\"\"",
"if",
"len",
"(",
"keyid",
")",
">",
"255",
":",
"raise",
"ECEException",
"(",
"\"keyid is too long\"",
")",
"header",
"=",
"salt",
"if",
"rs",
">",
"MAX_RECORD_SIZE",
":",
"raise",
"ECEException",
"(",
"\"Too much content\"",
")",
"header",
"+=",
"struct",
".",
"pack",
"(",
"\"!L\"",
",",
"rs",
")",
"header",
"+=",
"struct",
".",
"pack",
"(",
"\"!B\"",
",",
"len",
"(",
"keyid",
")",
")",
"header",
"+=",
"keyid",
"return",
"header",
"+",
"content",
"if",
"version",
"not",
"in",
"versions",
":",
"raise",
"ECEException",
"(",
"u\"Invalid version\"",
")",
"if",
"salt",
"is",
"None",
":",
"salt",
"=",
"os",
".",
"urandom",
"(",
"16",
")",
"(",
"key_",
",",
"nonce_",
")",
"=",
"derive_key",
"(",
"\"encrypt\"",
",",
"version",
"=",
"version",
",",
"salt",
"=",
"salt",
",",
"key",
"=",
"key",
",",
"private_key",
"=",
"private_key",
",",
"dh",
"=",
"dh",
",",
"auth_secret",
"=",
"auth_secret",
",",
"keyid",
"=",
"keyid",
",",
"keylabel",
"=",
"keylabel",
")",
"overhead",
"=",
"versions",
"[",
"version",
"]",
"[",
"'pad'",
"]",
"if",
"version",
"==",
"'aes128gcm'",
":",
"overhead",
"+=",
"16",
"end",
"=",
"len",
"(",
"content",
")",
"else",
":",
"end",
"=",
"len",
"(",
"content",
")",
"+",
"1",
"if",
"rs",
"<=",
"overhead",
":",
"raise",
"ECEException",
"(",
"u\"Record size too small\"",
")",
"chunk_size",
"=",
"rs",
"-",
"overhead",
"result",
"=",
"b\"\"",
"counter",
"=",
"0",
"# the extra one on the loop ensures that we produce a padding only",
"# record if the data length is an exact multiple of the chunk size",
"for",
"i",
"in",
"list",
"(",
"range",
"(",
"0",
",",
"end",
",",
"chunk_size",
")",
")",
":",
"result",
"+=",
"encrypt_record",
"(",
"key_",
",",
"nonce_",
",",
"counter",
",",
"content",
"[",
"i",
":",
"i",
"+",
"chunk_size",
"]",
",",
"(",
"i",
"+",
"chunk_size",
")",
">=",
"end",
")",
"counter",
"+=",
"1",
"if",
"version",
"==",
"\"aes128gcm\"",
":",
"if",
"keyid",
"is",
"None",
"and",
"private_key",
"is",
"not",
"None",
":",
"kid",
"=",
"private_key",
".",
"public_key",
"(",
")",
".",
"public_bytes",
"(",
"Encoding",
".",
"X962",
",",
"PublicFormat",
".",
"UncompressedPoint",
")",
"else",
":",
"kid",
"=",
"(",
"keyid",
"or",
"''",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"return",
"compose_aes128gcm",
"(",
"salt",
",",
"result",
",",
"rs",
",",
"keyid",
"=",
"kid",
")",
"return",
"result"
] |
Encrypt a data block
:param content: block of data to encrypt
:type content: str
:param salt: Encryption salt
:type salt: str
:param key: Encryption key data
:type key: str
:param private_key: DH private key
:type key: object
:param keyid: Internal key identifier for private key info
:type keyid: str
:param dh: Remote Diffie Hellman sequence
:type dh: str
:param rs: Record size
:type rs: int
:param auth_secret: Authorization secret
:type auth_secret: str
:param version: ECE Method version
:type version: enumerate('aes128gcm', 'aesgcm', 'aesgcm128')
:return: Encrypted message content
:rtype str
|
[
"Encrypt",
"a",
"data",
"block"
] |
849aebea751752e17fc84a64ce1bbf65dc994e6c
|
https://github.com/web-push-libs/encrypted-content-encoding/blob/849aebea751752e17fc84a64ce1bbf65dc994e6c/python/http_ece/__init__.py#L297-L405
|
train
|
varlink/python
|
varlink/error.py
|
VarlinkError.parameters
|
def parameters(self, namespaced=False):
"""returns the exception varlink error parameters"""
if namespaced:
return json.loads(json.dumps(self.args[0]['parameters']), object_hook=lambda d: SimpleNamespace(**d))
else:
return self.args[0].get('parameters')
|
python
|
def parameters(self, namespaced=False):
"""returns the exception varlink error parameters"""
if namespaced:
return json.loads(json.dumps(self.args[0]['parameters']), object_hook=lambda d: SimpleNamespace(**d))
else:
return self.args[0].get('parameters')
|
[
"def",
"parameters",
"(",
"self",
",",
"namespaced",
"=",
"False",
")",
":",
"if",
"namespaced",
":",
"return",
"json",
".",
"loads",
"(",
"json",
".",
"dumps",
"(",
"self",
".",
"args",
"[",
"0",
"]",
"[",
"'parameters'",
"]",
")",
",",
"object_hook",
"=",
"lambda",
"d",
":",
"SimpleNamespace",
"(",
"*",
"*",
"d",
")",
")",
"else",
":",
"return",
"self",
".",
"args",
"[",
"0",
"]",
".",
"get",
"(",
"'parameters'",
")"
] |
returns the exception varlink error parameters
|
[
"returns",
"the",
"exception",
"varlink",
"error",
"parameters"
] |
b021a29dd9def06b03416d20e8b37be39c3edd33
|
https://github.com/varlink/python/blob/b021a29dd9def06b03416d20e8b37be39c3edd33/varlink/error.py#L66-L71
|
train
|
varlink/python
|
varlink/server.py
|
Service.handle
|
def handle(self, message, _server=None, _request=None):
"""This generator function handles any incoming message.
Write any returned bytes to the output stream.
>>> for outgoing_message in service.handle(incoming_message):
>>> connection.write(outgoing_message)
"""
if not message:
return
if message[-1] == 0:
message = message[:-1]
string = message.decode('utf-8')
handle = self._handle(json.loads(string), message, _server, _request)
for out in handle:
if out == None:
return
try:
yield json.dumps(out, cls=VarlinkEncoder).encode('utf-8')
except ConnectionError as e:
try:
handle.throw(e)
except StopIteration:
pass
|
python
|
def handle(self, message, _server=None, _request=None):
"""This generator function handles any incoming message.
Write any returned bytes to the output stream.
>>> for outgoing_message in service.handle(incoming_message):
>>> connection.write(outgoing_message)
"""
if not message:
return
if message[-1] == 0:
message = message[:-1]
string = message.decode('utf-8')
handle = self._handle(json.loads(string), message, _server, _request)
for out in handle:
if out == None:
return
try:
yield json.dumps(out, cls=VarlinkEncoder).encode('utf-8')
except ConnectionError as e:
try:
handle.throw(e)
except StopIteration:
pass
|
[
"def",
"handle",
"(",
"self",
",",
"message",
",",
"_server",
"=",
"None",
",",
"_request",
"=",
"None",
")",
":",
"if",
"not",
"message",
":",
"return",
"if",
"message",
"[",
"-",
"1",
"]",
"==",
"0",
":",
"message",
"=",
"message",
"[",
":",
"-",
"1",
"]",
"string",
"=",
"message",
".",
"decode",
"(",
"'utf-8'",
")",
"handle",
"=",
"self",
".",
"_handle",
"(",
"json",
".",
"loads",
"(",
"string",
")",
",",
"message",
",",
"_server",
",",
"_request",
")",
"for",
"out",
"in",
"handle",
":",
"if",
"out",
"==",
"None",
":",
"return",
"try",
":",
"yield",
"json",
".",
"dumps",
"(",
"out",
",",
"cls",
"=",
"VarlinkEncoder",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"except",
"ConnectionError",
"as",
"e",
":",
"try",
":",
"handle",
".",
"throw",
"(",
"e",
")",
"except",
"StopIteration",
":",
"pass"
] |
This generator function handles any incoming message.
Write any returned bytes to the output stream.
>>> for outgoing_message in service.handle(incoming_message):
>>> connection.write(outgoing_message)
|
[
"This",
"generator",
"function",
"handles",
"any",
"incoming",
"message",
"."
] |
b021a29dd9def06b03416d20e8b37be39c3edd33
|
https://github.com/varlink/python/blob/b021a29dd9def06b03416d20e8b37be39c3edd33/varlink/server.py#L227-L252
|
train
|
varlink/python
|
varlink/server.py
|
Server.server_close
|
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
if self.remove_file:
try:
os.remove(self.remove_file)
except:
pass
self.socket.close()
|
python
|
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
if self.remove_file:
try:
os.remove(self.remove_file)
except:
pass
self.socket.close()
|
[
"def",
"server_close",
"(",
"self",
")",
":",
"if",
"self",
".",
"remove_file",
":",
"try",
":",
"os",
".",
"remove",
"(",
"self",
".",
"remove_file",
")",
"except",
":",
"pass",
"self",
".",
"socket",
".",
"close",
"(",
")"
] |
Called to clean-up the server.
May be overridden.
|
[
"Called",
"to",
"clean",
"-",
"up",
"the",
"server",
"."
] |
b021a29dd9def06b03416d20e8b37be39c3edd33
|
https://github.com/varlink/python/blob/b021a29dd9def06b03416d20e8b37be39c3edd33/varlink/server.py#L473-L484
|
train
|
varlink/python
|
varlink/client.py
|
Client.open
|
def open(self, interface_name, namespaced=False, connection=None):
"""Open a new connection and get a client interface handle with the varlink methods installed.
:param interface_name: an interface name, which the service this client object is
connected to, provides.
:param namespaced: If arguments and return values are instances of SimpleNamespace
rather than dictionaries.
:param connection: If set, get the interface handle for an already opened connection.
:exception InterfaceNotFound: if the interface is not found
"""
if not connection:
connection = self.open_connection()
if interface_name not in self._interfaces:
self.get_interface(interface_name, socket_connection=connection)
if interface_name not in self._interfaces:
raise InterfaceNotFound(interface_name)
return self.handler(self._interfaces[interface_name], connection, namespaced=namespaced)
|
python
|
def open(self, interface_name, namespaced=False, connection=None):
"""Open a new connection and get a client interface handle with the varlink methods installed.
:param interface_name: an interface name, which the service this client object is
connected to, provides.
:param namespaced: If arguments and return values are instances of SimpleNamespace
rather than dictionaries.
:param connection: If set, get the interface handle for an already opened connection.
:exception InterfaceNotFound: if the interface is not found
"""
if not connection:
connection = self.open_connection()
if interface_name not in self._interfaces:
self.get_interface(interface_name, socket_connection=connection)
if interface_name not in self._interfaces:
raise InterfaceNotFound(interface_name)
return self.handler(self._interfaces[interface_name], connection, namespaced=namespaced)
|
[
"def",
"open",
"(",
"self",
",",
"interface_name",
",",
"namespaced",
"=",
"False",
",",
"connection",
"=",
"None",
")",
":",
"if",
"not",
"connection",
":",
"connection",
"=",
"self",
".",
"open_connection",
"(",
")",
"if",
"interface_name",
"not",
"in",
"self",
".",
"_interfaces",
":",
"self",
".",
"get_interface",
"(",
"interface_name",
",",
"socket_connection",
"=",
"connection",
")",
"if",
"interface_name",
"not",
"in",
"self",
".",
"_interfaces",
":",
"raise",
"InterfaceNotFound",
"(",
"interface_name",
")",
"return",
"self",
".",
"handler",
"(",
"self",
".",
"_interfaces",
"[",
"interface_name",
"]",
",",
"connection",
",",
"namespaced",
"=",
"namespaced",
")"
] |
Open a new connection and get a client interface handle with the varlink methods installed.
:param interface_name: an interface name, which the service this client object is
connected to, provides.
:param namespaced: If arguments and return values are instances of SimpleNamespace
rather than dictionaries.
:param connection: If set, get the interface handle for an already opened connection.
:exception InterfaceNotFound: if the interface is not found
|
[
"Open",
"a",
"new",
"connection",
"and",
"get",
"a",
"client",
"interface",
"handle",
"with",
"the",
"varlink",
"methods",
"installed",
"."
] |
b021a29dd9def06b03416d20e8b37be39c3edd33
|
https://github.com/varlink/python/blob/b021a29dd9def06b03416d20e8b37be39c3edd33/varlink/client.py#L585-L606
|
train
|
varlink/python
|
varlink/client.py
|
Client.get_interfaces
|
def get_interfaces(self, socket_connection=None):
"""Returns the a list of Interface objects the service implements."""
if not socket_connection:
socket_connection = self.open_connection()
close_socket = True
else:
close_socket = False
# noinspection PyUnresolvedReferences
_service = self.handler(self._interfaces["org.varlink.service"], socket_connection)
self.info = _service.GetInfo()
if close_socket:
socket_connection.close()
return self.info['interfaces']
|
python
|
def get_interfaces(self, socket_connection=None):
"""Returns the a list of Interface objects the service implements."""
if not socket_connection:
socket_connection = self.open_connection()
close_socket = True
else:
close_socket = False
# noinspection PyUnresolvedReferences
_service = self.handler(self._interfaces["org.varlink.service"], socket_connection)
self.info = _service.GetInfo()
if close_socket:
socket_connection.close()
return self.info['interfaces']
|
[
"def",
"get_interfaces",
"(",
"self",
",",
"socket_connection",
"=",
"None",
")",
":",
"if",
"not",
"socket_connection",
":",
"socket_connection",
"=",
"self",
".",
"open_connection",
"(",
")",
"close_socket",
"=",
"True",
"else",
":",
"close_socket",
"=",
"False",
"# noinspection PyUnresolvedReferences",
"_service",
"=",
"self",
".",
"handler",
"(",
"self",
".",
"_interfaces",
"[",
"\"org.varlink.service\"",
"]",
",",
"socket_connection",
")",
"self",
".",
"info",
"=",
"_service",
".",
"GetInfo",
"(",
")",
"if",
"close_socket",
":",
"socket_connection",
".",
"close",
"(",
")",
"return",
"self",
".",
"info",
"[",
"'interfaces'",
"]"
] |
Returns the a list of Interface objects the service implements.
|
[
"Returns",
"the",
"a",
"list",
"of",
"Interface",
"objects",
"the",
"service",
"implements",
"."
] |
b021a29dd9def06b03416d20e8b37be39c3edd33
|
https://github.com/varlink/python/blob/b021a29dd9def06b03416d20e8b37be39c3edd33/varlink/client.py#L615-L630
|
train
|
varlink/python
|
varlink/client.py
|
Client.add_interface
|
def add_interface(self, interface):
"""Manually add or overwrite an interface definition from an Interface object.
:param interface: an Interface() object
"""
if not isinstance(interface, Interface):
raise TypeError
self._interfaces[interface.name] = interface
|
python
|
def add_interface(self, interface):
"""Manually add or overwrite an interface definition from an Interface object.
:param interface: an Interface() object
"""
if not isinstance(interface, Interface):
raise TypeError
self._interfaces[interface.name] = interface
|
[
"def",
"add_interface",
"(",
"self",
",",
"interface",
")",
":",
"if",
"not",
"isinstance",
"(",
"interface",
",",
"Interface",
")",
":",
"raise",
"TypeError",
"self",
".",
"_interfaces",
"[",
"interface",
".",
"name",
"]",
"=",
"interface"
] |
Manually add or overwrite an interface definition from an Interface object.
:param interface: an Interface() object
|
[
"Manually",
"add",
"or",
"overwrite",
"an",
"interface",
"definition",
"from",
"an",
"Interface",
"object",
"."
] |
b021a29dd9def06b03416d20e8b37be39c3edd33
|
https://github.com/varlink/python/blob/b021a29dd9def06b03416d20e8b37be39c3edd33/varlink/client.py#L650-L659
|
train
|
SINGROUP/SOAPLite
|
utilities/batchSoapPy.py
|
create
|
def create(atoms_list,N, L, cutoff = 0, all_atomtypes=[]):
"""Takes a trajectory xyz file and writes soap features
"""
myAlphas, myBetas = genBasis.getBasisFunc(cutoff, N)
# get information about feature length
n_datapoints = len(atoms_list)
atoms = atoms_list[0]
x = get_lastatom_soap(atoms, cutoff, myAlphas, myBetas,N,L, all_atomtypes=all_atomtypes)
n_features = x.shape[1]
print("soap first", x.shape)
print(n_datapoints, n_features)
soapmatrix = np.zeros((n_datapoints, n_features))
i = -1
for atoms in atoms_list:
i +=1
#atoms
print("Processing " + str(atoms.info)," Run time: " + str(time.time()-t0_total), end="\r")
soapmatrix[i,:] = get_lastatom_soap(atoms, cutoff, myAlphas, myBetas, N, L, all_atomtypes=all_atomtypes)
print("")
# infos
print("shape", soapmatrix.shape)
return soapmatrix
|
python
|
def create(atoms_list,N, L, cutoff = 0, all_atomtypes=[]):
"""Takes a trajectory xyz file and writes soap features
"""
myAlphas, myBetas = genBasis.getBasisFunc(cutoff, N)
# get information about feature length
n_datapoints = len(atoms_list)
atoms = atoms_list[0]
x = get_lastatom_soap(atoms, cutoff, myAlphas, myBetas,N,L, all_atomtypes=all_atomtypes)
n_features = x.shape[1]
print("soap first", x.shape)
print(n_datapoints, n_features)
soapmatrix = np.zeros((n_datapoints, n_features))
i = -1
for atoms in atoms_list:
i +=1
#atoms
print("Processing " + str(atoms.info)," Run time: " + str(time.time()-t0_total), end="\r")
soapmatrix[i,:] = get_lastatom_soap(atoms, cutoff, myAlphas, myBetas, N, L, all_atomtypes=all_atomtypes)
print("")
# infos
print("shape", soapmatrix.shape)
return soapmatrix
|
[
"def",
"create",
"(",
"atoms_list",
",",
"N",
",",
"L",
",",
"cutoff",
"=",
"0",
",",
"all_atomtypes",
"=",
"[",
"]",
")",
":",
"myAlphas",
",",
"myBetas",
"=",
"genBasis",
".",
"getBasisFunc",
"(",
"cutoff",
",",
"N",
")",
"# get information about feature length",
"n_datapoints",
"=",
"len",
"(",
"atoms_list",
")",
"atoms",
"=",
"atoms_list",
"[",
"0",
"]",
"x",
"=",
"get_lastatom_soap",
"(",
"atoms",
",",
"cutoff",
",",
"myAlphas",
",",
"myBetas",
",",
"N",
",",
"L",
",",
"all_atomtypes",
"=",
"all_atomtypes",
")",
"n_features",
"=",
"x",
".",
"shape",
"[",
"1",
"]",
"print",
"(",
"\"soap first\"",
",",
"x",
".",
"shape",
")",
"print",
"(",
"n_datapoints",
",",
"n_features",
")",
"soapmatrix",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_datapoints",
",",
"n_features",
")",
")",
"i",
"=",
"-",
"1",
"for",
"atoms",
"in",
"atoms_list",
":",
"i",
"+=",
"1",
"#atoms",
"print",
"(",
"\"Processing \"",
"+",
"str",
"(",
"atoms",
".",
"info",
")",
",",
"\" Run time: \"",
"+",
"str",
"(",
"time",
".",
"time",
"(",
")",
"-",
"t0_total",
")",
",",
"end",
"=",
"\"\\r\"",
")",
"soapmatrix",
"[",
"i",
",",
":",
"]",
"=",
"get_lastatom_soap",
"(",
"atoms",
",",
"cutoff",
",",
"myAlphas",
",",
"myBetas",
",",
"N",
",",
"L",
",",
"all_atomtypes",
"=",
"all_atomtypes",
")",
"print",
"(",
"\"\"",
")",
"# infos",
"print",
"(",
"\"shape\"",
",",
"soapmatrix",
".",
"shape",
")",
"return",
"soapmatrix"
] |
Takes a trajectory xyz file and writes soap features
|
[
"Takes",
"a",
"trajectory",
"xyz",
"file",
"and",
"writes",
"soap",
"features"
] |
80e27cc8d5b4c887011542c5a799583bfc6ff643
|
https://github.com/SINGROUP/SOAPLite/blob/80e27cc8d5b4c887011542c5a799583bfc6ff643/utilities/batchSoapPy.py#L21-L44
|
train
|
SINGROUP/SOAPLite
|
soaplite/getBasis.py
|
getPoly
|
def getPoly(rCut, nMax):
"""Used to calculate discrete vectors for the polynomial basis functions.
Args:
rCut(float): Radial cutoff
nMax(int): Number of polynomial radial functions
"""
rCutVeryHard = rCut+5.0
rx = 0.5*rCutVeryHard*(x + 1)
basisFunctions = []
for i in range(1, nMax + 1):
basisFunctions.append(lambda rr, i=i, rCut=rCut: (rCut - np.clip(rr, 0, rCut))**(i+2))
# Calculate the overlap of the different polynomial functions in a
# matrix S. These overlaps defined through the dot product over the
# radial coordinate are analytically calculable: Integrate[(rc - r)^(a
# + 2) (rc - r)^(b + 2) r^2, {r, 0, rc}]. Then the weights B that make
# the basis orthonormal are given by B=S^{-1/2}
S = np.zeros((nMax, nMax))
for i in range(1, nMax+1):
for j in range(1, nMax+1):
S[i-1, j-1] = (2*(rCut)**(7+i+j))/((5+i+j)*(6+i+j)*(7+i+j))
betas = sqrtm(np.linalg.inv(S))
# If the result is complex, the calculation is currently halted.
if (betas.dtype == np.complex128):
raise ValueError(
"Could not calculate normalization factors for the polynomial basis"
" in the domain of real numbers. Lowering the number of radial "
"basis functions is advised."
)
fs = np.zeros([nMax, len(x)])
for n in range(1, nMax+1):
fs[n-1, :] = (rCut-np.clip(rx, 0, rCut))**(n+2)
gss = np.dot(betas, fs)
return nMax, rx, gss
|
python
|
def getPoly(rCut, nMax):
"""Used to calculate discrete vectors for the polynomial basis functions.
Args:
rCut(float): Radial cutoff
nMax(int): Number of polynomial radial functions
"""
rCutVeryHard = rCut+5.0
rx = 0.5*rCutVeryHard*(x + 1)
basisFunctions = []
for i in range(1, nMax + 1):
basisFunctions.append(lambda rr, i=i, rCut=rCut: (rCut - np.clip(rr, 0, rCut))**(i+2))
# Calculate the overlap of the different polynomial functions in a
# matrix S. These overlaps defined through the dot product over the
# radial coordinate are analytically calculable: Integrate[(rc - r)^(a
# + 2) (rc - r)^(b + 2) r^2, {r, 0, rc}]. Then the weights B that make
# the basis orthonormal are given by B=S^{-1/2}
S = np.zeros((nMax, nMax))
for i in range(1, nMax+1):
for j in range(1, nMax+1):
S[i-1, j-1] = (2*(rCut)**(7+i+j))/((5+i+j)*(6+i+j)*(7+i+j))
betas = sqrtm(np.linalg.inv(S))
# If the result is complex, the calculation is currently halted.
if (betas.dtype == np.complex128):
raise ValueError(
"Could not calculate normalization factors for the polynomial basis"
" in the domain of real numbers. Lowering the number of radial "
"basis functions is advised."
)
fs = np.zeros([nMax, len(x)])
for n in range(1, nMax+1):
fs[n-1, :] = (rCut-np.clip(rx, 0, rCut))**(n+2)
gss = np.dot(betas, fs)
return nMax, rx, gss
|
[
"def",
"getPoly",
"(",
"rCut",
",",
"nMax",
")",
":",
"rCutVeryHard",
"=",
"rCut",
"+",
"5.0",
"rx",
"=",
"0.5",
"*",
"rCutVeryHard",
"*",
"(",
"x",
"+",
"1",
")",
"basisFunctions",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"nMax",
"+",
"1",
")",
":",
"basisFunctions",
".",
"append",
"(",
"lambda",
"rr",
",",
"i",
"=",
"i",
",",
"rCut",
"=",
"rCut",
":",
"(",
"rCut",
"-",
"np",
".",
"clip",
"(",
"rr",
",",
"0",
",",
"rCut",
")",
")",
"**",
"(",
"i",
"+",
"2",
")",
")",
"# Calculate the overlap of the different polynomial functions in a",
"# matrix S. These overlaps defined through the dot product over the",
"# radial coordinate are analytically calculable: Integrate[(rc - r)^(a",
"# + 2) (rc - r)^(b + 2) r^2, {r, 0, rc}]. Then the weights B that make",
"# the basis orthonormal are given by B=S^{-1/2}",
"S",
"=",
"np",
".",
"zeros",
"(",
"(",
"nMax",
",",
"nMax",
")",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"nMax",
"+",
"1",
")",
":",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"nMax",
"+",
"1",
")",
":",
"S",
"[",
"i",
"-",
"1",
",",
"j",
"-",
"1",
"]",
"=",
"(",
"2",
"*",
"(",
"rCut",
")",
"**",
"(",
"7",
"+",
"i",
"+",
"j",
")",
")",
"/",
"(",
"(",
"5",
"+",
"i",
"+",
"j",
")",
"*",
"(",
"6",
"+",
"i",
"+",
"j",
")",
"*",
"(",
"7",
"+",
"i",
"+",
"j",
")",
")",
"betas",
"=",
"sqrtm",
"(",
"np",
".",
"linalg",
".",
"inv",
"(",
"S",
")",
")",
"# If the result is complex, the calculation is currently halted.",
"if",
"(",
"betas",
".",
"dtype",
"==",
"np",
".",
"complex128",
")",
":",
"raise",
"ValueError",
"(",
"\"Could not calculate normalization factors for the polynomial basis\"",
"\" in the domain of real numbers. Lowering the number of radial \"",
"\"basis functions is advised.\"",
")",
"fs",
"=",
"np",
".",
"zeros",
"(",
"[",
"nMax",
",",
"len",
"(",
"x",
")",
"]",
")",
"for",
"n",
"in",
"range",
"(",
"1",
",",
"nMax",
"+",
"1",
")",
":",
"fs",
"[",
"n",
"-",
"1",
",",
":",
"]",
"=",
"(",
"rCut",
"-",
"np",
".",
"clip",
"(",
"rx",
",",
"0",
",",
"rCut",
")",
")",
"**",
"(",
"n",
"+",
"2",
")",
"gss",
"=",
"np",
".",
"dot",
"(",
"betas",
",",
"fs",
")",
"return",
"nMax",
",",
"rx",
",",
"gss"
] |
Used to calculate discrete vectors for the polynomial basis functions.
Args:
rCut(float): Radial cutoff
nMax(int): Number of polynomial radial functions
|
[
"Used",
"to",
"calculate",
"discrete",
"vectors",
"for",
"the",
"polynomial",
"basis",
"functions",
"."
] |
80e27cc8d5b4c887011542c5a799583bfc6ff643
|
https://github.com/SINGROUP/SOAPLite/blob/80e27cc8d5b4c887011542c5a799583bfc6ff643/soaplite/getBasis.py#L303-L342
|
train
|
SINGROUP/SOAPLite
|
soaplite/core.py
|
_format_ase2clusgeo
|
def _format_ase2clusgeo(obj, all_atomtypes=None):
""" Takes an ase Atoms object and returns numpy arrays and integers
which are read by the internal clusgeo. Apos is currently a flattened
out numpy array
Args:
obj():
all_atomtypes():
sort():
"""
#atoms metadata
totalAN = len(obj)
if all_atomtypes is not None:
atomtype_set = set(all_atomtypes)
else:
atomtype_set = set(obj.get_atomic_numbers())
atomtype_lst = np.sort(list(atomtype_set))
n_atoms_per_type_lst = []
pos_lst = []
for atomtype in atomtype_lst:
condition = obj.get_atomic_numbers() == atomtype
pos_onetype = obj.get_positions()[condition]
n_onetype = pos_onetype.shape[0]
# store data in lists
pos_lst.append(pos_onetype)
n_atoms_per_type_lst.append(n_onetype)
typeNs = n_atoms_per_type_lst
Ntypes = len(n_atoms_per_type_lst)
atomtype_lst
Apos = np.concatenate(pos_lst).ravel()
return Apos, typeNs, Ntypes, atomtype_lst, totalAN
|
python
|
def _format_ase2clusgeo(obj, all_atomtypes=None):
""" Takes an ase Atoms object and returns numpy arrays and integers
which are read by the internal clusgeo. Apos is currently a flattened
out numpy array
Args:
obj():
all_atomtypes():
sort():
"""
#atoms metadata
totalAN = len(obj)
if all_atomtypes is not None:
atomtype_set = set(all_atomtypes)
else:
atomtype_set = set(obj.get_atomic_numbers())
atomtype_lst = np.sort(list(atomtype_set))
n_atoms_per_type_lst = []
pos_lst = []
for atomtype in atomtype_lst:
condition = obj.get_atomic_numbers() == atomtype
pos_onetype = obj.get_positions()[condition]
n_onetype = pos_onetype.shape[0]
# store data in lists
pos_lst.append(pos_onetype)
n_atoms_per_type_lst.append(n_onetype)
typeNs = n_atoms_per_type_lst
Ntypes = len(n_atoms_per_type_lst)
atomtype_lst
Apos = np.concatenate(pos_lst).ravel()
return Apos, typeNs, Ntypes, atomtype_lst, totalAN
|
[
"def",
"_format_ase2clusgeo",
"(",
"obj",
",",
"all_atomtypes",
"=",
"None",
")",
":",
"#atoms metadata",
"totalAN",
"=",
"len",
"(",
"obj",
")",
"if",
"all_atomtypes",
"is",
"not",
"None",
":",
"atomtype_set",
"=",
"set",
"(",
"all_atomtypes",
")",
"else",
":",
"atomtype_set",
"=",
"set",
"(",
"obj",
".",
"get_atomic_numbers",
"(",
")",
")",
"atomtype_lst",
"=",
"np",
".",
"sort",
"(",
"list",
"(",
"atomtype_set",
")",
")",
"n_atoms_per_type_lst",
"=",
"[",
"]",
"pos_lst",
"=",
"[",
"]",
"for",
"atomtype",
"in",
"atomtype_lst",
":",
"condition",
"=",
"obj",
".",
"get_atomic_numbers",
"(",
")",
"==",
"atomtype",
"pos_onetype",
"=",
"obj",
".",
"get_positions",
"(",
")",
"[",
"condition",
"]",
"n_onetype",
"=",
"pos_onetype",
".",
"shape",
"[",
"0",
"]",
"# store data in lists",
"pos_lst",
".",
"append",
"(",
"pos_onetype",
")",
"n_atoms_per_type_lst",
".",
"append",
"(",
"n_onetype",
")",
"typeNs",
"=",
"n_atoms_per_type_lst",
"Ntypes",
"=",
"len",
"(",
"n_atoms_per_type_lst",
")",
"atomtype_lst",
"Apos",
"=",
"np",
".",
"concatenate",
"(",
"pos_lst",
")",
".",
"ravel",
"(",
")",
"return",
"Apos",
",",
"typeNs",
",",
"Ntypes",
",",
"atomtype_lst",
",",
"totalAN"
] |
Takes an ase Atoms object and returns numpy arrays and integers
which are read by the internal clusgeo. Apos is currently a flattened
out numpy array
Args:
obj():
all_atomtypes():
sort():
|
[
"Takes",
"an",
"ase",
"Atoms",
"object",
"and",
"returns",
"numpy",
"arrays",
"and",
"integers",
"which",
"are",
"read",
"by",
"the",
"internal",
"clusgeo",
".",
"Apos",
"is",
"currently",
"a",
"flattened",
"out",
"numpy",
"array"
] |
80e27cc8d5b4c887011542c5a799583bfc6ff643
|
https://github.com/SINGROUP/SOAPLite/blob/80e27cc8d5b4c887011542c5a799583bfc6ff643/soaplite/core.py#L11-L44
|
train
|
SINGROUP/SOAPLite
|
soaplite/core.py
|
get_soap_structure
|
def get_soap_structure(obj, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0):
"""Get the RBF basis SOAP output for atoms in a finite structure.
Args:
obj(ase.Atoms): Atomic structure for which the SOAP output is
calculated.
alp: Alphas
bet: Betas
rCut: Radial cutoff.
nMax: Maximum nmber of radial basis functions
Lmax: Maximum spherical harmonics degree
crossOver:
all_atomtypes: Can be used to specify the atomic elements for which to
calculate the output. If given the output is calculated only for the
given species.
eta: The gaussian smearing width.
Returns:
np.ndarray: SOAP output for the given structure.
"""
Hpos = obj.get_positions()
arrsoap = get_soap_locals(obj, Hpos, alp, bet, rCut, nMax, Lmax, crossOver, all_atomtypes=all_atomtypes, eta=eta)
return arrsoap
|
python
|
def get_soap_structure(obj, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0):
"""Get the RBF basis SOAP output for atoms in a finite structure.
Args:
obj(ase.Atoms): Atomic structure for which the SOAP output is
calculated.
alp: Alphas
bet: Betas
rCut: Radial cutoff.
nMax: Maximum nmber of radial basis functions
Lmax: Maximum spherical harmonics degree
crossOver:
all_atomtypes: Can be used to specify the atomic elements for which to
calculate the output. If given the output is calculated only for the
given species.
eta: The gaussian smearing width.
Returns:
np.ndarray: SOAP output for the given structure.
"""
Hpos = obj.get_positions()
arrsoap = get_soap_locals(obj, Hpos, alp, bet, rCut, nMax, Lmax, crossOver, all_atomtypes=all_atomtypes, eta=eta)
return arrsoap
|
[
"def",
"get_soap_structure",
"(",
"obj",
",",
"alp",
",",
"bet",
",",
"rCut",
"=",
"5.0",
",",
"nMax",
"=",
"5",
",",
"Lmax",
"=",
"5",
",",
"crossOver",
"=",
"True",
",",
"all_atomtypes",
"=",
"None",
",",
"eta",
"=",
"1.0",
")",
":",
"Hpos",
"=",
"obj",
".",
"get_positions",
"(",
")",
"arrsoap",
"=",
"get_soap_locals",
"(",
"obj",
",",
"Hpos",
",",
"alp",
",",
"bet",
",",
"rCut",
",",
"nMax",
",",
"Lmax",
",",
"crossOver",
",",
"all_atomtypes",
"=",
"all_atomtypes",
",",
"eta",
"=",
"eta",
")",
"return",
"arrsoap"
] |
Get the RBF basis SOAP output for atoms in a finite structure.
Args:
obj(ase.Atoms): Atomic structure for which the SOAP output is
calculated.
alp: Alphas
bet: Betas
rCut: Radial cutoff.
nMax: Maximum nmber of radial basis functions
Lmax: Maximum spherical harmonics degree
crossOver:
all_atomtypes: Can be used to specify the atomic elements for which to
calculate the output. If given the output is calculated only for the
given species.
eta: The gaussian smearing width.
Returns:
np.ndarray: SOAP output for the given structure.
|
[
"Get",
"the",
"RBF",
"basis",
"SOAP",
"output",
"for",
"atoms",
"in",
"a",
"finite",
"structure",
"."
] |
80e27cc8d5b4c887011542c5a799583bfc6ff643
|
https://github.com/SINGROUP/SOAPLite/blob/80e27cc8d5b4c887011542c5a799583bfc6ff643/soaplite/core.py#L172-L195
|
train
|
SINGROUP/SOAPLite
|
soaplite/core.py
|
get_periodic_soap_locals
|
def get_periodic_soap_locals(obj, Hpos, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0):
"""Get the RBF basis SOAP output for the given position in a periodic system.
Args:
obj(ase.Atoms): Atomic structure for which the SOAP output is
calculated.
alp: Alphas
bet: Betas
rCut: Radial cutoff.
nMax: Maximum nmber of radial basis functions
Lmax: Maximum spherical harmonics degree
crossOver:
all_atomtypes: Can be used to specify the atomic elements for which to
calculate the output. If given the output is calculated only for the
given species.
eta: The gaussian smearing width.
Returns:
np.ndarray: SOAP output for the given position.
"""
suce = _get_supercell(obj, rCut)
arrsoap = get_soap_locals(suce, Hpos, alp, bet, rCut, nMax=nMax, Lmax=Lmax, crossOver=crossOver, all_atomtypes=all_atomtypes, eta=eta)
return arrsoap
|
python
|
def get_periodic_soap_locals(obj, Hpos, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0):
"""Get the RBF basis SOAP output for the given position in a periodic system.
Args:
obj(ase.Atoms): Atomic structure for which the SOAP output is
calculated.
alp: Alphas
bet: Betas
rCut: Radial cutoff.
nMax: Maximum nmber of radial basis functions
Lmax: Maximum spherical harmonics degree
crossOver:
all_atomtypes: Can be used to specify the atomic elements for which to
calculate the output. If given the output is calculated only for the
given species.
eta: The gaussian smearing width.
Returns:
np.ndarray: SOAP output for the given position.
"""
suce = _get_supercell(obj, rCut)
arrsoap = get_soap_locals(suce, Hpos, alp, bet, rCut, nMax=nMax, Lmax=Lmax, crossOver=crossOver, all_atomtypes=all_atomtypes, eta=eta)
return arrsoap
|
[
"def",
"get_periodic_soap_locals",
"(",
"obj",
",",
"Hpos",
",",
"alp",
",",
"bet",
",",
"rCut",
"=",
"5.0",
",",
"nMax",
"=",
"5",
",",
"Lmax",
"=",
"5",
",",
"crossOver",
"=",
"True",
",",
"all_atomtypes",
"=",
"None",
",",
"eta",
"=",
"1.0",
")",
":",
"suce",
"=",
"_get_supercell",
"(",
"obj",
",",
"rCut",
")",
"arrsoap",
"=",
"get_soap_locals",
"(",
"suce",
",",
"Hpos",
",",
"alp",
",",
"bet",
",",
"rCut",
",",
"nMax",
"=",
"nMax",
",",
"Lmax",
"=",
"Lmax",
",",
"crossOver",
"=",
"crossOver",
",",
"all_atomtypes",
"=",
"all_atomtypes",
",",
"eta",
"=",
"eta",
")",
"return",
"arrsoap"
] |
Get the RBF basis SOAP output for the given position in a periodic system.
Args:
obj(ase.Atoms): Atomic structure for which the SOAP output is
calculated.
alp: Alphas
bet: Betas
rCut: Radial cutoff.
nMax: Maximum nmber of radial basis functions
Lmax: Maximum spherical harmonics degree
crossOver:
all_atomtypes: Can be used to specify the atomic elements for which to
calculate the output. If given the output is calculated only for the
given species.
eta: The gaussian smearing width.
Returns:
np.ndarray: SOAP output for the given position.
|
[
"Get",
"the",
"RBF",
"basis",
"SOAP",
"output",
"for",
"the",
"given",
"position",
"in",
"a",
"periodic",
"system",
"."
] |
80e27cc8d5b4c887011542c5a799583bfc6ff643
|
https://github.com/SINGROUP/SOAPLite/blob/80e27cc8d5b4c887011542c5a799583bfc6ff643/soaplite/core.py#L198-L221
|
train
|
adrn/gala
|
gala/dynamics/orbit.py
|
Orbit.orbit_gen
|
def orbit_gen(self):
"""
Generator for iterating over each orbit.
"""
if self.norbits == 1:
yield self
else:
for i in range(self.norbits):
yield self[:, i]
|
python
|
def orbit_gen(self):
"""
Generator for iterating over each orbit.
"""
if self.norbits == 1:
yield self
else:
for i in range(self.norbits):
yield self[:, i]
|
[
"def",
"orbit_gen",
"(",
"self",
")",
":",
"if",
"self",
".",
"norbits",
"==",
"1",
":",
"yield",
"self",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"norbits",
")",
":",
"yield",
"self",
"[",
":",
",",
"i",
"]"
] |
Generator for iterating over each orbit.
|
[
"Generator",
"for",
"iterating",
"over",
"each",
"orbit",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/orbit.py#L294-L303
|
train
|
adrn/gala
|
gala/dynamics/orbit.py
|
Orbit.zmax
|
def zmax(self, return_times=False, func=np.mean,
interp_kwargs=None, minimize_kwargs=None,
approximate=False):
"""
Estimate the maximum ``z`` height of the orbit by identifying local
maxima in the absolute value of the ``z`` position and interpolating
between timesteps near the maxima.
By default, this returns the mean of all local maxima. To get, e.g., the
largest ``z`` excursion, pass in ``func=np.max``. To get all ``z``
maxima, pass in ``func=None``.
Parameters
----------
func : func (optional)
A function to evaluate on all of the identified z maximum times.
return_times : bool (optional)
Also return the times of maximum.
interp_kwargs : dict (optional)
Keyword arguments to be passed to
:class:`scipy.interpolate.InterpolatedUnivariateSpline`.
minimize_kwargs : dict (optional)
Keyword arguments to be passed to :class:`scipy.optimize.minimize`.
approximate : bool (optional)
Compute approximate values by skipping interpolation.
Returns
-------
zs : float, :class:`~numpy.ndarray`
Either a single number or an array of maximum z heights.
times : :class:`~numpy.ndarray` (optional, see ``return_times``)
If ``return_times=True``, also returns an array of the apocenter
times.
"""
if return_times and func is not None:
raise ValueError("Cannot return times if reducing "
"using an input function. Pass `func=None` if "
"you want to return all individual values "
"and times.")
if func is None:
reduce = False
func = lambda x: x
else:
reduce = True
# time must increase
if self.t[-1] < self.t[0]:
self = self[::-1]
vals = []
times = []
for orbit in self.orbit_gen():
v, t = orbit._max_helper(np.abs(orbit.cylindrical.z),
interp_kwargs=interp_kwargs,
minimize_kwargs=minimize_kwargs,
approximate=approximate)
vals.append(func(v))
times.append(t)
return self._max_return_helper(vals, times, return_times, reduce)
|
python
|
def zmax(self, return_times=False, func=np.mean,
interp_kwargs=None, minimize_kwargs=None,
approximate=False):
"""
Estimate the maximum ``z`` height of the orbit by identifying local
maxima in the absolute value of the ``z`` position and interpolating
between timesteps near the maxima.
By default, this returns the mean of all local maxima. To get, e.g., the
largest ``z`` excursion, pass in ``func=np.max``. To get all ``z``
maxima, pass in ``func=None``.
Parameters
----------
func : func (optional)
A function to evaluate on all of the identified z maximum times.
return_times : bool (optional)
Also return the times of maximum.
interp_kwargs : dict (optional)
Keyword arguments to be passed to
:class:`scipy.interpolate.InterpolatedUnivariateSpline`.
minimize_kwargs : dict (optional)
Keyword arguments to be passed to :class:`scipy.optimize.minimize`.
approximate : bool (optional)
Compute approximate values by skipping interpolation.
Returns
-------
zs : float, :class:`~numpy.ndarray`
Either a single number or an array of maximum z heights.
times : :class:`~numpy.ndarray` (optional, see ``return_times``)
If ``return_times=True``, also returns an array of the apocenter
times.
"""
if return_times and func is not None:
raise ValueError("Cannot return times if reducing "
"using an input function. Pass `func=None` if "
"you want to return all individual values "
"and times.")
if func is None:
reduce = False
func = lambda x: x
else:
reduce = True
# time must increase
if self.t[-1] < self.t[0]:
self = self[::-1]
vals = []
times = []
for orbit in self.orbit_gen():
v, t = orbit._max_helper(np.abs(orbit.cylindrical.z),
interp_kwargs=interp_kwargs,
minimize_kwargs=minimize_kwargs,
approximate=approximate)
vals.append(func(v))
times.append(t)
return self._max_return_helper(vals, times, return_times, reduce)
|
[
"def",
"zmax",
"(",
"self",
",",
"return_times",
"=",
"False",
",",
"func",
"=",
"np",
".",
"mean",
",",
"interp_kwargs",
"=",
"None",
",",
"minimize_kwargs",
"=",
"None",
",",
"approximate",
"=",
"False",
")",
":",
"if",
"return_times",
"and",
"func",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot return times if reducing \"",
"\"using an input function. Pass `func=None` if \"",
"\"you want to return all individual values \"",
"\"and times.\"",
")",
"if",
"func",
"is",
"None",
":",
"reduce",
"=",
"False",
"func",
"=",
"lambda",
"x",
":",
"x",
"else",
":",
"reduce",
"=",
"True",
"# time must increase",
"if",
"self",
".",
"t",
"[",
"-",
"1",
"]",
"<",
"self",
".",
"t",
"[",
"0",
"]",
":",
"self",
"=",
"self",
"[",
":",
":",
"-",
"1",
"]",
"vals",
"=",
"[",
"]",
"times",
"=",
"[",
"]",
"for",
"orbit",
"in",
"self",
".",
"orbit_gen",
"(",
")",
":",
"v",
",",
"t",
"=",
"orbit",
".",
"_max_helper",
"(",
"np",
".",
"abs",
"(",
"orbit",
".",
"cylindrical",
".",
"z",
")",
",",
"interp_kwargs",
"=",
"interp_kwargs",
",",
"minimize_kwargs",
"=",
"minimize_kwargs",
",",
"approximate",
"=",
"approximate",
")",
"vals",
".",
"append",
"(",
"func",
"(",
"v",
")",
")",
"times",
".",
"append",
"(",
"t",
")",
"return",
"self",
".",
"_max_return_helper",
"(",
"vals",
",",
"times",
",",
"return_times",
",",
"reduce",
")"
] |
Estimate the maximum ``z`` height of the orbit by identifying local
maxima in the absolute value of the ``z`` position and interpolating
between timesteps near the maxima.
By default, this returns the mean of all local maxima. To get, e.g., the
largest ``z`` excursion, pass in ``func=np.max``. To get all ``z``
maxima, pass in ``func=None``.
Parameters
----------
func : func (optional)
A function to evaluate on all of the identified z maximum times.
return_times : bool (optional)
Also return the times of maximum.
interp_kwargs : dict (optional)
Keyword arguments to be passed to
:class:`scipy.interpolate.InterpolatedUnivariateSpline`.
minimize_kwargs : dict (optional)
Keyword arguments to be passed to :class:`scipy.optimize.minimize`.
approximate : bool (optional)
Compute approximate values by skipping interpolation.
Returns
-------
zs : float, :class:`~numpy.ndarray`
Either a single number or an array of maximum z heights.
times : :class:`~numpy.ndarray` (optional, see ``return_times``)
If ``return_times=True``, also returns an array of the apocenter
times.
|
[
"Estimate",
"the",
"maximum",
"z",
"height",
"of",
"the",
"orbit",
"by",
"identifying",
"local",
"maxima",
"in",
"the",
"absolute",
"value",
"of",
"the",
"z",
"position",
"and",
"interpolating",
"between",
"timesteps",
"near",
"the",
"maxima",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/orbit.py#L550-L612
|
train
|
adrn/gala
|
gala/dynamics/orbit.py
|
Orbit.eccentricity
|
def eccentricity(self, **kw):
r"""
Returns the eccentricity computed from the mean apocenter and
mean pericenter.
.. math::
e = \frac{r_{\rm apo} - r_{\rm per}}{r_{\rm apo} + r_{\rm per}}
Parameters
----------
**kw
Any keyword arguments passed to ``apocenter()`` and
``pericenter()``. For example, ``approximate=True``.
Returns
-------
ecc : float
The orbital eccentricity.
"""
ra = self.apocenter(**kw)
rp = self.pericenter(**kw)
return (ra - rp) / (ra + rp)
|
python
|
def eccentricity(self, **kw):
r"""
Returns the eccentricity computed from the mean apocenter and
mean pericenter.
.. math::
e = \frac{r_{\rm apo} - r_{\rm per}}{r_{\rm apo} + r_{\rm per}}
Parameters
----------
**kw
Any keyword arguments passed to ``apocenter()`` and
``pericenter()``. For example, ``approximate=True``.
Returns
-------
ecc : float
The orbital eccentricity.
"""
ra = self.apocenter(**kw)
rp = self.pericenter(**kw)
return (ra - rp) / (ra + rp)
|
[
"def",
"eccentricity",
"(",
"self",
",",
"*",
"*",
"kw",
")",
":",
"ra",
"=",
"self",
".",
"apocenter",
"(",
"*",
"*",
"kw",
")",
"rp",
"=",
"self",
".",
"pericenter",
"(",
"*",
"*",
"kw",
")",
"return",
"(",
"ra",
"-",
"rp",
")",
"/",
"(",
"ra",
"+",
"rp",
")"
] |
r"""
Returns the eccentricity computed from the mean apocenter and
mean pericenter.
.. math::
e = \frac{r_{\rm apo} - r_{\rm per}}{r_{\rm apo} + r_{\rm per}}
Parameters
----------
**kw
Any keyword arguments passed to ``apocenter()`` and
``pericenter()``. For example, ``approximate=True``.
Returns
-------
ecc : float
The orbital eccentricity.
|
[
"r",
"Returns",
"the",
"eccentricity",
"computed",
"from",
"the",
"mean",
"apocenter",
"and",
"mean",
"pericenter",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/orbit.py#L614-L637
|
train
|
adrn/gala
|
gala/dynamics/orbit.py
|
Orbit.estimate_period
|
def estimate_period(self, radial=True):
"""
Estimate the period of the orbit. By default, computes the radial
period. If ``radial==False``, this returns period estimates for
each dimension of the orbit.
Parameters
----------
radial : bool (optional)
What period to estimate. If ``True``, estimates the radial
period. If ``False``, estimates period in each dimension, e.g.,
if the orbit is 3D, along x, y, and z.
Returns
-------
T : `~astropy.units.Quantity`
The period or periods.
"""
if self.t is None:
raise ValueError("To compute the period, a time array is needed."
" Specify a time array when creating this object.")
if radial:
r = self.physicsspherical.r.value
if self.norbits == 1:
T = peak_to_peak_period(self.t.value, r)
T = T * self.t.unit
else:
T = [peak_to_peak_period(self.t.value, r[:,n])
for n in range(r.shape[1])]
T = T * self.t.unit
else:
raise NotImplementedError("sorry 'bout that...")
return T
|
python
|
def estimate_period(self, radial=True):
"""
Estimate the period of the orbit. By default, computes the radial
period. If ``radial==False``, this returns period estimates for
each dimension of the orbit.
Parameters
----------
radial : bool (optional)
What period to estimate. If ``True``, estimates the radial
period. If ``False``, estimates period in each dimension, e.g.,
if the orbit is 3D, along x, y, and z.
Returns
-------
T : `~astropy.units.Quantity`
The period or periods.
"""
if self.t is None:
raise ValueError("To compute the period, a time array is needed."
" Specify a time array when creating this object.")
if radial:
r = self.physicsspherical.r.value
if self.norbits == 1:
T = peak_to_peak_period(self.t.value, r)
T = T * self.t.unit
else:
T = [peak_to_peak_period(self.t.value, r[:,n])
for n in range(r.shape[1])]
T = T * self.t.unit
else:
raise NotImplementedError("sorry 'bout that...")
return T
|
[
"def",
"estimate_period",
"(",
"self",
",",
"radial",
"=",
"True",
")",
":",
"if",
"self",
".",
"t",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"To compute the period, a time array is needed.\"",
"\" Specify a time array when creating this object.\"",
")",
"if",
"radial",
":",
"r",
"=",
"self",
".",
"physicsspherical",
".",
"r",
".",
"value",
"if",
"self",
".",
"norbits",
"==",
"1",
":",
"T",
"=",
"peak_to_peak_period",
"(",
"self",
".",
"t",
".",
"value",
",",
"r",
")",
"T",
"=",
"T",
"*",
"self",
".",
"t",
".",
"unit",
"else",
":",
"T",
"=",
"[",
"peak_to_peak_period",
"(",
"self",
".",
"t",
".",
"value",
",",
"r",
"[",
":",
",",
"n",
"]",
")",
"for",
"n",
"in",
"range",
"(",
"r",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"T",
"=",
"T",
"*",
"self",
".",
"t",
".",
"unit",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"sorry 'bout that...\"",
")",
"return",
"T"
] |
Estimate the period of the orbit. By default, computes the radial
period. If ``radial==False``, this returns period estimates for
each dimension of the orbit.
Parameters
----------
radial : bool (optional)
What period to estimate. If ``True``, estimates the radial
period. If ``False``, estimates period in each dimension, e.g.,
if the orbit is 3D, along x, y, and z.
Returns
-------
T : `~astropy.units.Quantity`
The period or periods.
|
[
"Estimate",
"the",
"period",
"of",
"the",
"orbit",
".",
"By",
"default",
"computes",
"the",
"radial",
"period",
".",
"If",
"radial",
"==",
"False",
"this",
"returns",
"period",
"estimates",
"for",
"each",
"dimension",
"of",
"the",
"orbit",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/orbit.py#L639-L675
|
train
|
adrn/gala
|
gala/dynamics/orbit.py
|
Orbit.circulation
|
def circulation(self):
"""
Determine which axes the Orbit circulates around by checking
whether there is a change of sign of the angular momentum
about an axis. Returns a 2D array with ``ndim`` integers per orbit
point. If a box orbit, all integers will be 0. A 1 indicates
circulation about the corresponding axis.
TODO: clockwise / counterclockwise?
For example, for a single 3D orbit:
- Box and boxlet = [0,0,0]
- z-axis (short-axis) tube = [0,0,1]
- x-axis (long-axis) tube = [1,0,0]
Returns
-------
circulation : :class:`numpy.ndarray`
An array that specifies whether there is circulation about any of
the axes of the input orbit. For a single orbit, will return a
1D array, but for multiple orbits, the shape will be
``(3, norbits)``.
"""
L = self.angular_momentum()
# if only 2D, add another empty axis
if L.ndim == 2:
single_orbit = True
L = L[...,None]
else:
single_orbit = False
ndim,ntimes,norbits = L.shape
# initial angular momentum
L0 = L[:,0]
# see if at any timestep the sign has changed
circ = np.ones((ndim,norbits))
for ii in range(ndim):
cnd = (np.sign(L0[ii]) != np.sign(L[ii,1:])) | \
(np.abs(L[ii,1:]).value < 1E-13)
ix = np.atleast_1d(np.any(cnd, axis=0))
circ[ii,ix] = 0
circ = circ.astype(int)
if single_orbit:
return circ.reshape((ndim,))
else:
return circ
|
python
|
def circulation(self):
"""
Determine which axes the Orbit circulates around by checking
whether there is a change of sign of the angular momentum
about an axis. Returns a 2D array with ``ndim`` integers per orbit
point. If a box orbit, all integers will be 0. A 1 indicates
circulation about the corresponding axis.
TODO: clockwise / counterclockwise?
For example, for a single 3D orbit:
- Box and boxlet = [0,0,0]
- z-axis (short-axis) tube = [0,0,1]
- x-axis (long-axis) tube = [1,0,0]
Returns
-------
circulation : :class:`numpy.ndarray`
An array that specifies whether there is circulation about any of
the axes of the input orbit. For a single orbit, will return a
1D array, but for multiple orbits, the shape will be
``(3, norbits)``.
"""
L = self.angular_momentum()
# if only 2D, add another empty axis
if L.ndim == 2:
single_orbit = True
L = L[...,None]
else:
single_orbit = False
ndim,ntimes,norbits = L.shape
# initial angular momentum
L0 = L[:,0]
# see if at any timestep the sign has changed
circ = np.ones((ndim,norbits))
for ii in range(ndim):
cnd = (np.sign(L0[ii]) != np.sign(L[ii,1:])) | \
(np.abs(L[ii,1:]).value < 1E-13)
ix = np.atleast_1d(np.any(cnd, axis=0))
circ[ii,ix] = 0
circ = circ.astype(int)
if single_orbit:
return circ.reshape((ndim,))
else:
return circ
|
[
"def",
"circulation",
"(",
"self",
")",
":",
"L",
"=",
"self",
".",
"angular_momentum",
"(",
")",
"# if only 2D, add another empty axis",
"if",
"L",
".",
"ndim",
"==",
"2",
":",
"single_orbit",
"=",
"True",
"L",
"=",
"L",
"[",
"...",
",",
"None",
"]",
"else",
":",
"single_orbit",
"=",
"False",
"ndim",
",",
"ntimes",
",",
"norbits",
"=",
"L",
".",
"shape",
"# initial angular momentum",
"L0",
"=",
"L",
"[",
":",
",",
"0",
"]",
"# see if at any timestep the sign has changed",
"circ",
"=",
"np",
".",
"ones",
"(",
"(",
"ndim",
",",
"norbits",
")",
")",
"for",
"ii",
"in",
"range",
"(",
"ndim",
")",
":",
"cnd",
"=",
"(",
"np",
".",
"sign",
"(",
"L0",
"[",
"ii",
"]",
")",
"!=",
"np",
".",
"sign",
"(",
"L",
"[",
"ii",
",",
"1",
":",
"]",
")",
")",
"|",
"(",
"np",
".",
"abs",
"(",
"L",
"[",
"ii",
",",
"1",
":",
"]",
")",
".",
"value",
"<",
"1E-13",
")",
"ix",
"=",
"np",
".",
"atleast_1d",
"(",
"np",
".",
"any",
"(",
"cnd",
",",
"axis",
"=",
"0",
")",
")",
"circ",
"[",
"ii",
",",
"ix",
"]",
"=",
"0",
"circ",
"=",
"circ",
".",
"astype",
"(",
"int",
")",
"if",
"single_orbit",
":",
"return",
"circ",
".",
"reshape",
"(",
"(",
"ndim",
",",
")",
")",
"else",
":",
"return",
"circ"
] |
Determine which axes the Orbit circulates around by checking
whether there is a change of sign of the angular momentum
about an axis. Returns a 2D array with ``ndim`` integers per orbit
point. If a box orbit, all integers will be 0. A 1 indicates
circulation about the corresponding axis.
TODO: clockwise / counterclockwise?
For example, for a single 3D orbit:
- Box and boxlet = [0,0,0]
- z-axis (short-axis) tube = [0,0,1]
- x-axis (long-axis) tube = [1,0,0]
Returns
-------
circulation : :class:`numpy.ndarray`
An array that specifies whether there is circulation about any of
the axes of the input orbit. For a single orbit, will return a
1D array, but for multiple orbits, the shape will be
``(3, norbits)``.
|
[
"Determine",
"which",
"axes",
"the",
"Orbit",
"circulates",
"around",
"by",
"checking",
"whether",
"there",
"is",
"a",
"change",
"of",
"sign",
"of",
"the",
"angular",
"momentum",
"about",
"an",
"axis",
".",
"Returns",
"a",
"2D",
"array",
"with",
"ndim",
"integers",
"per",
"orbit",
"point",
".",
"If",
"a",
"box",
"orbit",
"all",
"integers",
"will",
"be",
"0",
".",
"A",
"1",
"indicates",
"circulation",
"about",
"the",
"corresponding",
"axis",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/orbit.py#L680-L731
|
train
|
adrn/gala
|
gala/dynamics/orbit.py
|
Orbit.align_circulation_with_z
|
def align_circulation_with_z(self, circulation=None):
"""
If the input orbit is a tube orbit, this function aligns the circulation
axis with the z axis and returns a copy.
Parameters
----------
circulation : array_like (optional)
Array of bits that specify the axis about which the orbit
circulates. If not provided, will compute this using
:meth:`~gala.dynamics.Orbit.circulation`. See that method for more
information.
Returns
-------
orb : :class:`~gala.dynamics.Orbit`
A copy of the original orbit object with circulation aligned with
the z axis.
"""
if circulation is None:
circulation = self.circulation()
circulation = atleast_2d(circulation, insert_axis=1)
cart = self.cartesian
pos = cart.xyz
vel = np.vstack((cart.v_x.value[None],
cart.v_y.value[None],
cart.v_z.value[None])) * cart.v_x.unit
if pos.ndim < 3:
pos = pos[...,np.newaxis]
vel = vel[...,np.newaxis]
if (circulation.shape[0] != self.ndim or
circulation.shape[1] != pos.shape[2]):
raise ValueError("Shape of 'circulation' array should match the "
"shape of the position/velocity (minus the time "
"axis).")
new_pos = pos.copy()
new_vel = vel.copy()
for n in range(pos.shape[2]):
if circulation[2,n] == 1 or np.all(circulation[:,n] == 0):
# already circulating about z or box orbit
continue
if sum(circulation[:,n]) > 1:
logger.warning("Circulation about multiple axes - are you sure "
"the orbit has been integrated for long enough?")
if circulation[0,n] == 1:
circ = 0
elif circulation[1,n] == 1:
circ = 1
else:
raise RuntimeError("Should never get here...")
new_pos[circ,:,n] = pos[2,:,n]
new_pos[2,:,n] = pos[circ,:,n]
new_vel[circ,:,n] = vel[2,:,n]
new_vel[2,:,n] = vel[circ,:,n]
return self.__class__(pos=new_pos.reshape(cart.xyz.shape),
vel=new_vel.reshape(cart.xyz.shape),
t=self.t,
hamiltonian=self.hamiltonian)
|
python
|
def align_circulation_with_z(self, circulation=None):
"""
If the input orbit is a tube orbit, this function aligns the circulation
axis with the z axis and returns a copy.
Parameters
----------
circulation : array_like (optional)
Array of bits that specify the axis about which the orbit
circulates. If not provided, will compute this using
:meth:`~gala.dynamics.Orbit.circulation`. See that method for more
information.
Returns
-------
orb : :class:`~gala.dynamics.Orbit`
A copy of the original orbit object with circulation aligned with
the z axis.
"""
if circulation is None:
circulation = self.circulation()
circulation = atleast_2d(circulation, insert_axis=1)
cart = self.cartesian
pos = cart.xyz
vel = np.vstack((cart.v_x.value[None],
cart.v_y.value[None],
cart.v_z.value[None])) * cart.v_x.unit
if pos.ndim < 3:
pos = pos[...,np.newaxis]
vel = vel[...,np.newaxis]
if (circulation.shape[0] != self.ndim or
circulation.shape[1] != pos.shape[2]):
raise ValueError("Shape of 'circulation' array should match the "
"shape of the position/velocity (minus the time "
"axis).")
new_pos = pos.copy()
new_vel = vel.copy()
for n in range(pos.shape[2]):
if circulation[2,n] == 1 or np.all(circulation[:,n] == 0):
# already circulating about z or box orbit
continue
if sum(circulation[:,n]) > 1:
logger.warning("Circulation about multiple axes - are you sure "
"the orbit has been integrated for long enough?")
if circulation[0,n] == 1:
circ = 0
elif circulation[1,n] == 1:
circ = 1
else:
raise RuntimeError("Should never get here...")
new_pos[circ,:,n] = pos[2,:,n]
new_pos[2,:,n] = pos[circ,:,n]
new_vel[circ,:,n] = vel[2,:,n]
new_vel[2,:,n] = vel[circ,:,n]
return self.__class__(pos=new_pos.reshape(cart.xyz.shape),
vel=new_vel.reshape(cart.xyz.shape),
t=self.t,
hamiltonian=self.hamiltonian)
|
[
"def",
"align_circulation_with_z",
"(",
"self",
",",
"circulation",
"=",
"None",
")",
":",
"if",
"circulation",
"is",
"None",
":",
"circulation",
"=",
"self",
".",
"circulation",
"(",
")",
"circulation",
"=",
"atleast_2d",
"(",
"circulation",
",",
"insert_axis",
"=",
"1",
")",
"cart",
"=",
"self",
".",
"cartesian",
"pos",
"=",
"cart",
".",
"xyz",
"vel",
"=",
"np",
".",
"vstack",
"(",
"(",
"cart",
".",
"v_x",
".",
"value",
"[",
"None",
"]",
",",
"cart",
".",
"v_y",
".",
"value",
"[",
"None",
"]",
",",
"cart",
".",
"v_z",
".",
"value",
"[",
"None",
"]",
")",
")",
"*",
"cart",
".",
"v_x",
".",
"unit",
"if",
"pos",
".",
"ndim",
"<",
"3",
":",
"pos",
"=",
"pos",
"[",
"...",
",",
"np",
".",
"newaxis",
"]",
"vel",
"=",
"vel",
"[",
"...",
",",
"np",
".",
"newaxis",
"]",
"if",
"(",
"circulation",
".",
"shape",
"[",
"0",
"]",
"!=",
"self",
".",
"ndim",
"or",
"circulation",
".",
"shape",
"[",
"1",
"]",
"!=",
"pos",
".",
"shape",
"[",
"2",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Shape of 'circulation' array should match the \"",
"\"shape of the position/velocity (minus the time \"",
"\"axis).\"",
")",
"new_pos",
"=",
"pos",
".",
"copy",
"(",
")",
"new_vel",
"=",
"vel",
".",
"copy",
"(",
")",
"for",
"n",
"in",
"range",
"(",
"pos",
".",
"shape",
"[",
"2",
"]",
")",
":",
"if",
"circulation",
"[",
"2",
",",
"n",
"]",
"==",
"1",
"or",
"np",
".",
"all",
"(",
"circulation",
"[",
":",
",",
"n",
"]",
"==",
"0",
")",
":",
"# already circulating about z or box orbit",
"continue",
"if",
"sum",
"(",
"circulation",
"[",
":",
",",
"n",
"]",
")",
">",
"1",
":",
"logger",
".",
"warning",
"(",
"\"Circulation about multiple axes - are you sure \"",
"\"the orbit has been integrated for long enough?\"",
")",
"if",
"circulation",
"[",
"0",
",",
"n",
"]",
"==",
"1",
":",
"circ",
"=",
"0",
"elif",
"circulation",
"[",
"1",
",",
"n",
"]",
"==",
"1",
":",
"circ",
"=",
"1",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Should never get here...\"",
")",
"new_pos",
"[",
"circ",
",",
":",
",",
"n",
"]",
"=",
"pos",
"[",
"2",
",",
":",
",",
"n",
"]",
"new_pos",
"[",
"2",
",",
":",
",",
"n",
"]",
"=",
"pos",
"[",
"circ",
",",
":",
",",
"n",
"]",
"new_vel",
"[",
"circ",
",",
":",
",",
"n",
"]",
"=",
"vel",
"[",
"2",
",",
":",
",",
"n",
"]",
"new_vel",
"[",
"2",
",",
":",
",",
"n",
"]",
"=",
"vel",
"[",
"circ",
",",
":",
",",
"n",
"]",
"return",
"self",
".",
"__class__",
"(",
"pos",
"=",
"new_pos",
".",
"reshape",
"(",
"cart",
".",
"xyz",
".",
"shape",
")",
",",
"vel",
"=",
"new_vel",
".",
"reshape",
"(",
"cart",
".",
"xyz",
".",
"shape",
")",
",",
"t",
"=",
"self",
".",
"t",
",",
"hamiltonian",
"=",
"self",
".",
"hamiltonian",
")"
] |
If the input orbit is a tube orbit, this function aligns the circulation
axis with the z axis and returns a copy.
Parameters
----------
circulation : array_like (optional)
Array of bits that specify the axis about which the orbit
circulates. If not provided, will compute this using
:meth:`~gala.dynamics.Orbit.circulation`. See that method for more
information.
Returns
-------
orb : :class:`~gala.dynamics.Orbit`
A copy of the original orbit object with circulation aligned with
the z axis.
|
[
"If",
"the",
"input",
"orbit",
"is",
"a",
"tube",
"orbit",
"this",
"function",
"aligns",
"the",
"circulation",
"axis",
"with",
"the",
"z",
"axis",
"and",
"returns",
"a",
"copy",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/orbit.py#L733-L800
|
train
|
adrn/gala
|
gala/coordinates/greatcircle.py
|
greatcircle_to_greatcircle
|
def greatcircle_to_greatcircle(from_greatcircle_coord,
to_greatcircle_frame):
"""Transform between two greatcircle frames."""
# This transform goes through the parent frames on each side.
# from_frame -> from_frame.origin -> to_frame.origin -> to_frame
intermediate_from = from_greatcircle_coord.transform_to(
from_greatcircle_coord.pole)
intermediate_to = intermediate_from.transform_to(
to_greatcircle_frame.pole)
return intermediate_to.transform_to(to_greatcircle_frame)
|
python
|
def greatcircle_to_greatcircle(from_greatcircle_coord,
to_greatcircle_frame):
"""Transform between two greatcircle frames."""
# This transform goes through the parent frames on each side.
# from_frame -> from_frame.origin -> to_frame.origin -> to_frame
intermediate_from = from_greatcircle_coord.transform_to(
from_greatcircle_coord.pole)
intermediate_to = intermediate_from.transform_to(
to_greatcircle_frame.pole)
return intermediate_to.transform_to(to_greatcircle_frame)
|
[
"def",
"greatcircle_to_greatcircle",
"(",
"from_greatcircle_coord",
",",
"to_greatcircle_frame",
")",
":",
"# This transform goes through the parent frames on each side.",
"# from_frame -> from_frame.origin -> to_frame.origin -> to_frame",
"intermediate_from",
"=",
"from_greatcircle_coord",
".",
"transform_to",
"(",
"from_greatcircle_coord",
".",
"pole",
")",
"intermediate_to",
"=",
"intermediate_from",
".",
"transform_to",
"(",
"to_greatcircle_frame",
".",
"pole",
")",
"return",
"intermediate_to",
".",
"transform_to",
"(",
"to_greatcircle_frame",
")"
] |
Transform between two greatcircle frames.
|
[
"Transform",
"between",
"two",
"greatcircle",
"frames",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/coordinates/greatcircle.py#L21-L31
|
train
|
adrn/gala
|
gala/coordinates/greatcircle.py
|
reference_to_greatcircle
|
def reference_to_greatcircle(reference_frame, greatcircle_frame):
"""Convert a reference coordinate to a great circle frame."""
# Define rotation matrices along the position angle vector, and
# relative to the origin.
pole = greatcircle_frame.pole.transform_to(coord.ICRS)
ra0 = greatcircle_frame.ra0
center = greatcircle_frame.center
R_rot = rotation_matrix(greatcircle_frame.rotation, 'z')
if not np.isnan(ra0):
xaxis = np.array([np.cos(ra0), np.sin(ra0), 0.])
zaxis = pole.cartesian.xyz.value
if np.abs(zaxis[2]) >= 1e-15:
xaxis[2] = -(zaxis[0]*xaxis[0] + zaxis[1]*xaxis[1]) / zaxis[2] # what?
else:
xaxis[2] = 0.
xaxis = xaxis / np.sqrt(np.sum(xaxis**2))
yaxis = np.cross(zaxis, xaxis)
R = np.stack((xaxis, yaxis, zaxis))
elif center is not None:
R1 = rotation_matrix(pole.ra, 'z')
R2 = rotation_matrix(90*u.deg - pole.dec, 'y')
Rtmp = matrix_product(R2, R1)
rot = center.cartesian.transform(Rtmp)
rot_lon = rot.represent_as(coord.UnitSphericalRepresentation).lon
R3 = rotation_matrix(rot_lon, 'z')
R = matrix_product(R3, R2, R1)
else:
R1 = rotation_matrix(pole.ra, 'z')
R2 = rotation_matrix(pole.dec, 'y')
R = matrix_product(R2, R1)
return matrix_product(R_rot, R)
|
python
|
def reference_to_greatcircle(reference_frame, greatcircle_frame):
"""Convert a reference coordinate to a great circle frame."""
# Define rotation matrices along the position angle vector, and
# relative to the origin.
pole = greatcircle_frame.pole.transform_to(coord.ICRS)
ra0 = greatcircle_frame.ra0
center = greatcircle_frame.center
R_rot = rotation_matrix(greatcircle_frame.rotation, 'z')
if not np.isnan(ra0):
xaxis = np.array([np.cos(ra0), np.sin(ra0), 0.])
zaxis = pole.cartesian.xyz.value
if np.abs(zaxis[2]) >= 1e-15:
xaxis[2] = -(zaxis[0]*xaxis[0] + zaxis[1]*xaxis[1]) / zaxis[2] # what?
else:
xaxis[2] = 0.
xaxis = xaxis / np.sqrt(np.sum(xaxis**2))
yaxis = np.cross(zaxis, xaxis)
R = np.stack((xaxis, yaxis, zaxis))
elif center is not None:
R1 = rotation_matrix(pole.ra, 'z')
R2 = rotation_matrix(90*u.deg - pole.dec, 'y')
Rtmp = matrix_product(R2, R1)
rot = center.cartesian.transform(Rtmp)
rot_lon = rot.represent_as(coord.UnitSphericalRepresentation).lon
R3 = rotation_matrix(rot_lon, 'z')
R = matrix_product(R3, R2, R1)
else:
R1 = rotation_matrix(pole.ra, 'z')
R2 = rotation_matrix(pole.dec, 'y')
R = matrix_product(R2, R1)
return matrix_product(R_rot, R)
|
[
"def",
"reference_to_greatcircle",
"(",
"reference_frame",
",",
"greatcircle_frame",
")",
":",
"# Define rotation matrices along the position angle vector, and",
"# relative to the origin.",
"pole",
"=",
"greatcircle_frame",
".",
"pole",
".",
"transform_to",
"(",
"coord",
".",
"ICRS",
")",
"ra0",
"=",
"greatcircle_frame",
".",
"ra0",
"center",
"=",
"greatcircle_frame",
".",
"center",
"R_rot",
"=",
"rotation_matrix",
"(",
"greatcircle_frame",
".",
"rotation",
",",
"'z'",
")",
"if",
"not",
"np",
".",
"isnan",
"(",
"ra0",
")",
":",
"xaxis",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"cos",
"(",
"ra0",
")",
",",
"np",
".",
"sin",
"(",
"ra0",
")",
",",
"0.",
"]",
")",
"zaxis",
"=",
"pole",
".",
"cartesian",
".",
"xyz",
".",
"value",
"if",
"np",
".",
"abs",
"(",
"zaxis",
"[",
"2",
"]",
")",
">=",
"1e-15",
":",
"xaxis",
"[",
"2",
"]",
"=",
"-",
"(",
"zaxis",
"[",
"0",
"]",
"*",
"xaxis",
"[",
"0",
"]",
"+",
"zaxis",
"[",
"1",
"]",
"*",
"xaxis",
"[",
"1",
"]",
")",
"/",
"zaxis",
"[",
"2",
"]",
"# what?",
"else",
":",
"xaxis",
"[",
"2",
"]",
"=",
"0.",
"xaxis",
"=",
"xaxis",
"/",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"xaxis",
"**",
"2",
")",
")",
"yaxis",
"=",
"np",
".",
"cross",
"(",
"zaxis",
",",
"xaxis",
")",
"R",
"=",
"np",
".",
"stack",
"(",
"(",
"xaxis",
",",
"yaxis",
",",
"zaxis",
")",
")",
"elif",
"center",
"is",
"not",
"None",
":",
"R1",
"=",
"rotation_matrix",
"(",
"pole",
".",
"ra",
",",
"'z'",
")",
"R2",
"=",
"rotation_matrix",
"(",
"90",
"*",
"u",
".",
"deg",
"-",
"pole",
".",
"dec",
",",
"'y'",
")",
"Rtmp",
"=",
"matrix_product",
"(",
"R2",
",",
"R1",
")",
"rot",
"=",
"center",
".",
"cartesian",
".",
"transform",
"(",
"Rtmp",
")",
"rot_lon",
"=",
"rot",
".",
"represent_as",
"(",
"coord",
".",
"UnitSphericalRepresentation",
")",
".",
"lon",
"R3",
"=",
"rotation_matrix",
"(",
"rot_lon",
",",
"'z'",
")",
"R",
"=",
"matrix_product",
"(",
"R3",
",",
"R2",
",",
"R1",
")",
"else",
":",
"R1",
"=",
"rotation_matrix",
"(",
"pole",
".",
"ra",
",",
"'z'",
")",
"R2",
"=",
"rotation_matrix",
"(",
"pole",
".",
"dec",
",",
"'y'",
")",
"R",
"=",
"matrix_product",
"(",
"R2",
",",
"R1",
")",
"return",
"matrix_product",
"(",
"R_rot",
",",
"R",
")"
] |
Convert a reference coordinate to a great circle frame.
|
[
"Convert",
"a",
"reference",
"coordinate",
"to",
"a",
"great",
"circle",
"frame",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/coordinates/greatcircle.py#L34-L70
|
train
|
adrn/gala
|
gala/coordinates/greatcircle.py
|
pole_from_endpoints
|
def pole_from_endpoints(coord1, coord2):
"""Compute the pole from a great circle that connects the two specified
coordinates.
This assumes a right-handed rule from coord1 to coord2: the pole is the
north pole under that assumption.
Parameters
----------
coord1 : `~astropy.coordinates.SkyCoord`
Coordinate of one point on a great circle.
coord2 : `~astropy.coordinates.SkyCoord`
Coordinate of the other point on a great circle.
Returns
-------
pole : `~astropy.coordinates.SkyCoord`
The coordinates of the pole.
"""
c1 = coord1.cartesian / coord1.cartesian.norm()
coord2 = coord2.transform_to(coord1.frame)
c2 = coord2.cartesian / coord2.cartesian.norm()
pole = c1.cross(c2)
pole = pole / pole.norm()
return coord1.frame.realize_frame(pole)
|
python
|
def pole_from_endpoints(coord1, coord2):
"""Compute the pole from a great circle that connects the two specified
coordinates.
This assumes a right-handed rule from coord1 to coord2: the pole is the
north pole under that assumption.
Parameters
----------
coord1 : `~astropy.coordinates.SkyCoord`
Coordinate of one point on a great circle.
coord2 : `~astropy.coordinates.SkyCoord`
Coordinate of the other point on a great circle.
Returns
-------
pole : `~astropy.coordinates.SkyCoord`
The coordinates of the pole.
"""
c1 = coord1.cartesian / coord1.cartesian.norm()
coord2 = coord2.transform_to(coord1.frame)
c2 = coord2.cartesian / coord2.cartesian.norm()
pole = c1.cross(c2)
pole = pole / pole.norm()
return coord1.frame.realize_frame(pole)
|
[
"def",
"pole_from_endpoints",
"(",
"coord1",
",",
"coord2",
")",
":",
"c1",
"=",
"coord1",
".",
"cartesian",
"/",
"coord1",
".",
"cartesian",
".",
"norm",
"(",
")",
"coord2",
"=",
"coord2",
".",
"transform_to",
"(",
"coord1",
".",
"frame",
")",
"c2",
"=",
"coord2",
".",
"cartesian",
"/",
"coord2",
".",
"cartesian",
".",
"norm",
"(",
")",
"pole",
"=",
"c1",
".",
"cross",
"(",
"c2",
")",
"pole",
"=",
"pole",
"/",
"pole",
".",
"norm",
"(",
")",
"return",
"coord1",
".",
"frame",
".",
"realize_frame",
"(",
"pole",
")"
] |
Compute the pole from a great circle that connects the two specified
coordinates.
This assumes a right-handed rule from coord1 to coord2: the pole is the
north pole under that assumption.
Parameters
----------
coord1 : `~astropy.coordinates.SkyCoord`
Coordinate of one point on a great circle.
coord2 : `~astropy.coordinates.SkyCoord`
Coordinate of the other point on a great circle.
Returns
-------
pole : `~astropy.coordinates.SkyCoord`
The coordinates of the pole.
|
[
"Compute",
"the",
"pole",
"from",
"a",
"great",
"circle",
"that",
"connects",
"the",
"two",
"specified",
"coordinates",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/coordinates/greatcircle.py#L270-L296
|
train
|
adrn/gala
|
gala/coordinates/greatcircle.py
|
sph_midpoint
|
def sph_midpoint(coord1, coord2):
"""Compute the midpoint between two points on the sphere.
Parameters
----------
coord1 : `~astropy.coordinates.SkyCoord`
Coordinate of one point on a great circle.
coord2 : `~astropy.coordinates.SkyCoord`
Coordinate of the other point on a great circle.
Returns
-------
midpt : `~astropy.coordinates.SkyCoord`
The coordinates of the spherical midpoint.
"""
c1 = coord1.cartesian / coord1.cartesian.norm()
coord2 = coord2.transform_to(coord1.frame)
c2 = coord2.cartesian / coord2.cartesian.norm()
midpt = 0.5 * (c1 + c2)
usph = midpt.represent_as(coord.UnitSphericalRepresentation)
return coord1.frame.realize_frame(usph)
|
python
|
def sph_midpoint(coord1, coord2):
"""Compute the midpoint between two points on the sphere.
Parameters
----------
coord1 : `~astropy.coordinates.SkyCoord`
Coordinate of one point on a great circle.
coord2 : `~astropy.coordinates.SkyCoord`
Coordinate of the other point on a great circle.
Returns
-------
midpt : `~astropy.coordinates.SkyCoord`
The coordinates of the spherical midpoint.
"""
c1 = coord1.cartesian / coord1.cartesian.norm()
coord2 = coord2.transform_to(coord1.frame)
c2 = coord2.cartesian / coord2.cartesian.norm()
midpt = 0.5 * (c1 + c2)
usph = midpt.represent_as(coord.UnitSphericalRepresentation)
return coord1.frame.realize_frame(usph)
|
[
"def",
"sph_midpoint",
"(",
"coord1",
",",
"coord2",
")",
":",
"c1",
"=",
"coord1",
".",
"cartesian",
"/",
"coord1",
".",
"cartesian",
".",
"norm",
"(",
")",
"coord2",
"=",
"coord2",
".",
"transform_to",
"(",
"coord1",
".",
"frame",
")",
"c2",
"=",
"coord2",
".",
"cartesian",
"/",
"coord2",
".",
"cartesian",
".",
"norm",
"(",
")",
"midpt",
"=",
"0.5",
"*",
"(",
"c1",
"+",
"c2",
")",
"usph",
"=",
"midpt",
".",
"represent_as",
"(",
"coord",
".",
"UnitSphericalRepresentation",
")",
"return",
"coord1",
".",
"frame",
".",
"realize_frame",
"(",
"usph",
")"
] |
Compute the midpoint between two points on the sphere.
Parameters
----------
coord1 : `~astropy.coordinates.SkyCoord`
Coordinate of one point on a great circle.
coord2 : `~astropy.coordinates.SkyCoord`
Coordinate of the other point on a great circle.
Returns
-------
midpt : `~astropy.coordinates.SkyCoord`
The coordinates of the spherical midpoint.
|
[
"Compute",
"the",
"midpoint",
"between",
"two",
"points",
"on",
"the",
"sphere",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/coordinates/greatcircle.py#L299-L322
|
train
|
adrn/gala
|
gala/coordinates/pm_cov_transform.py
|
get_uv_tan
|
def get_uv_tan(c):
"""Get tangent plane basis vectors on the unit sphere at the given
spherical coordinates.
"""
l = c.spherical.lon
b = c.spherical.lat
p = np.array([-np.sin(l), np.cos(l), np.zeros_like(l.value)]).T
q = np.array([-np.cos(l)*np.sin(b), -np.sin(l)*np.sin(b), np.cos(b)]).T
return np.stack((p, q), axis=-1)
|
python
|
def get_uv_tan(c):
"""Get tangent plane basis vectors on the unit sphere at the given
spherical coordinates.
"""
l = c.spherical.lon
b = c.spherical.lat
p = np.array([-np.sin(l), np.cos(l), np.zeros_like(l.value)]).T
q = np.array([-np.cos(l)*np.sin(b), -np.sin(l)*np.sin(b), np.cos(b)]).T
return np.stack((p, q), axis=-1)
|
[
"def",
"get_uv_tan",
"(",
"c",
")",
":",
"l",
"=",
"c",
".",
"spherical",
".",
"lon",
"b",
"=",
"c",
".",
"spherical",
".",
"lat",
"p",
"=",
"np",
".",
"array",
"(",
"[",
"-",
"np",
".",
"sin",
"(",
"l",
")",
",",
"np",
".",
"cos",
"(",
"l",
")",
",",
"np",
".",
"zeros_like",
"(",
"l",
".",
"value",
")",
"]",
")",
".",
"T",
"q",
"=",
"np",
".",
"array",
"(",
"[",
"-",
"np",
".",
"cos",
"(",
"l",
")",
"*",
"np",
".",
"sin",
"(",
"b",
")",
",",
"-",
"np",
".",
"sin",
"(",
"l",
")",
"*",
"np",
".",
"sin",
"(",
"b",
")",
",",
"np",
".",
"cos",
"(",
"b",
")",
"]",
")",
".",
"T",
"return",
"np",
".",
"stack",
"(",
"(",
"p",
",",
"q",
")",
",",
"axis",
"=",
"-",
"1",
")"
] |
Get tangent plane basis vectors on the unit sphere at the given
spherical coordinates.
|
[
"Get",
"tangent",
"plane",
"basis",
"vectors",
"on",
"the",
"unit",
"sphere",
"at",
"the",
"given",
"spherical",
"coordinates",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/coordinates/pm_cov_transform.py#L8-L18
|
train
|
adrn/gala
|
gala/coordinates/pm_cov_transform.py
|
transform_pm_cov
|
def transform_pm_cov(c, cov, to_frame):
"""Transform a proper motion covariance matrix to a new frame.
Parameters
----------
c : `~astropy.coordinates.SkyCoord`
The sky coordinates of the sources in the initial coordinate frame.
cov : array_like
The covariance matrix of the proper motions. Must have same length as
the input coordinates.
to_frame : `~astropy.coordinates.BaseCoordinateFrame` subclass
The frame to transform to as an Astropy coordinate frame class or
instance.
Returns
-------
new_cov : array_like
The transformed covariance matrix.
"""
if c.isscalar and cov.shape != (2, 2):
raise ValueError('If input coordinate object is a scalar coordinate, '
'the proper motion covariance matrix must have shape '
'(2, 2), not {}'.format(cov.shape))
elif not c.isscalar and len(c) != cov.shape[0]:
raise ValueError('Input coordinates and covariance matrix must have '
'the same number of entries ({} vs {}).'
.format(len(c), cov.shape[0]))
# 3D rotation matrix, to be projected onto the tangent plane
if hasattr(c, 'frame'):
frame = c.frame
else:
frame = c
R = get_transform_matrix(frame.__class__, to_frame)
# Get input coordinates in the desired frame:
c_to = c.transform_to(to_frame)
# Get tangent plane coordinates:
uv_in = get_uv_tan(c)
uv_to = get_uv_tan(c_to)
if not c.isscalar:
G = np.einsum('nab,nac->nbc', uv_to,
np.einsum('ji,nik->njk', R, uv_in))
# transform
cov_to = np.einsum('nba,nac->nbc', G,
np.einsum('nij,nki->njk', cov, G))
else:
G = np.einsum('ab,ac->bc', uv_to,
np.einsum('ji,ik->jk', R, uv_in))
# transform
cov_to = np.einsum('ba,ac->bc', G,
np.einsum('ij,ki->jk', cov, G))
return cov_to
|
python
|
def transform_pm_cov(c, cov, to_frame):
"""Transform a proper motion covariance matrix to a new frame.
Parameters
----------
c : `~astropy.coordinates.SkyCoord`
The sky coordinates of the sources in the initial coordinate frame.
cov : array_like
The covariance matrix of the proper motions. Must have same length as
the input coordinates.
to_frame : `~astropy.coordinates.BaseCoordinateFrame` subclass
The frame to transform to as an Astropy coordinate frame class or
instance.
Returns
-------
new_cov : array_like
The transformed covariance matrix.
"""
if c.isscalar and cov.shape != (2, 2):
raise ValueError('If input coordinate object is a scalar coordinate, '
'the proper motion covariance matrix must have shape '
'(2, 2), not {}'.format(cov.shape))
elif not c.isscalar and len(c) != cov.shape[0]:
raise ValueError('Input coordinates and covariance matrix must have '
'the same number of entries ({} vs {}).'
.format(len(c), cov.shape[0]))
# 3D rotation matrix, to be projected onto the tangent plane
if hasattr(c, 'frame'):
frame = c.frame
else:
frame = c
R = get_transform_matrix(frame.__class__, to_frame)
# Get input coordinates in the desired frame:
c_to = c.transform_to(to_frame)
# Get tangent plane coordinates:
uv_in = get_uv_tan(c)
uv_to = get_uv_tan(c_to)
if not c.isscalar:
G = np.einsum('nab,nac->nbc', uv_to,
np.einsum('ji,nik->njk', R, uv_in))
# transform
cov_to = np.einsum('nba,nac->nbc', G,
np.einsum('nij,nki->njk', cov, G))
else:
G = np.einsum('ab,ac->bc', uv_to,
np.einsum('ji,ik->jk', R, uv_in))
# transform
cov_to = np.einsum('ba,ac->bc', G,
np.einsum('ij,ki->jk', cov, G))
return cov_to
|
[
"def",
"transform_pm_cov",
"(",
"c",
",",
"cov",
",",
"to_frame",
")",
":",
"if",
"c",
".",
"isscalar",
"and",
"cov",
".",
"shape",
"!=",
"(",
"2",
",",
"2",
")",
":",
"raise",
"ValueError",
"(",
"'If input coordinate object is a scalar coordinate, '",
"'the proper motion covariance matrix must have shape '",
"'(2, 2), not {}'",
".",
"format",
"(",
"cov",
".",
"shape",
")",
")",
"elif",
"not",
"c",
".",
"isscalar",
"and",
"len",
"(",
"c",
")",
"!=",
"cov",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'Input coordinates and covariance matrix must have '",
"'the same number of entries ({} vs {}).'",
".",
"format",
"(",
"len",
"(",
"c",
")",
",",
"cov",
".",
"shape",
"[",
"0",
"]",
")",
")",
"# 3D rotation matrix, to be projected onto the tangent plane",
"if",
"hasattr",
"(",
"c",
",",
"'frame'",
")",
":",
"frame",
"=",
"c",
".",
"frame",
"else",
":",
"frame",
"=",
"c",
"R",
"=",
"get_transform_matrix",
"(",
"frame",
".",
"__class__",
",",
"to_frame",
")",
"# Get input coordinates in the desired frame:",
"c_to",
"=",
"c",
".",
"transform_to",
"(",
"to_frame",
")",
"# Get tangent plane coordinates:",
"uv_in",
"=",
"get_uv_tan",
"(",
"c",
")",
"uv_to",
"=",
"get_uv_tan",
"(",
"c_to",
")",
"if",
"not",
"c",
".",
"isscalar",
":",
"G",
"=",
"np",
".",
"einsum",
"(",
"'nab,nac->nbc'",
",",
"uv_to",
",",
"np",
".",
"einsum",
"(",
"'ji,nik->njk'",
",",
"R",
",",
"uv_in",
")",
")",
"# transform",
"cov_to",
"=",
"np",
".",
"einsum",
"(",
"'nba,nac->nbc'",
",",
"G",
",",
"np",
".",
"einsum",
"(",
"'nij,nki->njk'",
",",
"cov",
",",
"G",
")",
")",
"else",
":",
"G",
"=",
"np",
".",
"einsum",
"(",
"'ab,ac->bc'",
",",
"uv_to",
",",
"np",
".",
"einsum",
"(",
"'ji,ik->jk'",
",",
"R",
",",
"uv_in",
")",
")",
"# transform",
"cov_to",
"=",
"np",
".",
"einsum",
"(",
"'ba,ac->bc'",
",",
"G",
",",
"np",
".",
"einsum",
"(",
"'ij,ki->jk'",
",",
"cov",
",",
"G",
")",
")",
"return",
"cov_to"
] |
Transform a proper motion covariance matrix to a new frame.
Parameters
----------
c : `~astropy.coordinates.SkyCoord`
The sky coordinates of the sources in the initial coordinate frame.
cov : array_like
The covariance matrix of the proper motions. Must have same length as
the input coordinates.
to_frame : `~astropy.coordinates.BaseCoordinateFrame` subclass
The frame to transform to as an Astropy coordinate frame class or
instance.
Returns
-------
new_cov : array_like
The transformed covariance matrix.
|
[
"Transform",
"a",
"proper",
"motion",
"covariance",
"matrix",
"to",
"a",
"new",
"frame",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/coordinates/pm_cov_transform.py#L62-L121
|
train
|
adrn/gala
|
gala/potential/frame/builtin/transformations.py
|
rodrigues_axis_angle_rotate
|
def rodrigues_axis_angle_rotate(x, vec, theta):
"""
Rotated the input vector or set of vectors `x` around the axis
`vec` by the angle `theta`.
Parameters
----------
x : array_like
The vector or array of vectors to transform. Must have shape
"""
x = np.array(x).T
vec = np.array(vec).T
theta = np.array(theta).T[...,None]
out = np.cos(theta)*x + np.sin(theta)*np.cross(vec, x) + \
(1 - np.cos(theta)) * (vec * x).sum(axis=-1)[...,None] * vec
return out.T
|
python
|
def rodrigues_axis_angle_rotate(x, vec, theta):
"""
Rotated the input vector or set of vectors `x` around the axis
`vec` by the angle `theta`.
Parameters
----------
x : array_like
The vector or array of vectors to transform. Must have shape
"""
x = np.array(x).T
vec = np.array(vec).T
theta = np.array(theta).T[...,None]
out = np.cos(theta)*x + np.sin(theta)*np.cross(vec, x) + \
(1 - np.cos(theta)) * (vec * x).sum(axis=-1)[...,None] * vec
return out.T
|
[
"def",
"rodrigues_axis_angle_rotate",
"(",
"x",
",",
"vec",
",",
"theta",
")",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
".",
"T",
"vec",
"=",
"np",
".",
"array",
"(",
"vec",
")",
".",
"T",
"theta",
"=",
"np",
".",
"array",
"(",
"theta",
")",
".",
"T",
"[",
"...",
",",
"None",
"]",
"out",
"=",
"np",
".",
"cos",
"(",
"theta",
")",
"*",
"x",
"+",
"np",
".",
"sin",
"(",
"theta",
")",
"*",
"np",
".",
"cross",
"(",
"vec",
",",
"x",
")",
"+",
"(",
"1",
"-",
"np",
".",
"cos",
"(",
"theta",
")",
")",
"*",
"(",
"vec",
"*",
"x",
")",
".",
"sum",
"(",
"axis",
"=",
"-",
"1",
")",
"[",
"...",
",",
"None",
"]",
"*",
"vec",
"return",
"out",
".",
"T"
] |
Rotated the input vector or set of vectors `x` around the axis
`vec` by the angle `theta`.
Parameters
----------
x : array_like
The vector or array of vectors to transform. Must have shape
|
[
"Rotated",
"the",
"input",
"vector",
"or",
"set",
"of",
"vectors",
"x",
"around",
"the",
"axis",
"vec",
"by",
"the",
"angle",
"theta",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/potential/frame/builtin/transformations.py#L10-L29
|
train
|
adrn/gala
|
gala/potential/frame/builtin/transformations.py
|
z_angle_rotate
|
def z_angle_rotate(xy, theta):
"""
Rotated the input vector or set of vectors `xy` by the angle `theta`.
Parameters
----------
xy : array_like
The vector or array of vectors to transform. Must have shape
"""
xy = np.array(xy).T
theta = np.array(theta).T
out = np.zeros_like(xy)
out[...,0] = np.cos(theta)*xy[...,0] - np.sin(theta)*xy[...,1]
out[...,1] = np.sin(theta)*xy[...,0] + np.cos(theta)*xy[...,1]
return out.T
|
python
|
def z_angle_rotate(xy, theta):
"""
Rotated the input vector or set of vectors `xy` by the angle `theta`.
Parameters
----------
xy : array_like
The vector or array of vectors to transform. Must have shape
"""
xy = np.array(xy).T
theta = np.array(theta).T
out = np.zeros_like(xy)
out[...,0] = np.cos(theta)*xy[...,0] - np.sin(theta)*xy[...,1]
out[...,1] = np.sin(theta)*xy[...,0] + np.cos(theta)*xy[...,1]
return out.T
|
[
"def",
"z_angle_rotate",
"(",
"xy",
",",
"theta",
")",
":",
"xy",
"=",
"np",
".",
"array",
"(",
"xy",
")",
".",
"T",
"theta",
"=",
"np",
".",
"array",
"(",
"theta",
")",
".",
"T",
"out",
"=",
"np",
".",
"zeros_like",
"(",
"xy",
")",
"out",
"[",
"...",
",",
"0",
"]",
"=",
"np",
".",
"cos",
"(",
"theta",
")",
"*",
"xy",
"[",
"...",
",",
"0",
"]",
"-",
"np",
".",
"sin",
"(",
"theta",
")",
"*",
"xy",
"[",
"...",
",",
"1",
"]",
"out",
"[",
"...",
",",
"1",
"]",
"=",
"np",
".",
"sin",
"(",
"theta",
")",
"*",
"xy",
"[",
"...",
",",
"0",
"]",
"+",
"np",
".",
"cos",
"(",
"theta",
")",
"*",
"xy",
"[",
"...",
",",
"1",
"]",
"return",
"out",
".",
"T"
] |
Rotated the input vector or set of vectors `xy` by the angle `theta`.
Parameters
----------
xy : array_like
The vector or array of vectors to transform. Must have shape
|
[
"Rotated",
"the",
"input",
"vector",
"or",
"set",
"of",
"vectors",
"xy",
"by",
"the",
"angle",
"theta",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/potential/frame/builtin/transformations.py#L31-L49
|
train
|
adrn/gala
|
gala/potential/frame/builtin/transformations.py
|
static_to_constantrotating
|
def static_to_constantrotating(frame_i, frame_r, w, t=None):
"""
Transform from an inertial static frame to a rotating frame.
Parameters
----------
frame_i : `~gala.potential.StaticFrame`
frame_r : `~gala.potential.ConstantRotatingFrame`
w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`
t : quantity_like (optional)
Required if input coordinates are just a phase-space position.
Returns
-------
pos : `~astropy.units.Quantity`
Position in rotating frame.
vel : `~astropy.units.Quantity`
Velocity in rotating frame.
"""
return _constantrotating_static_helper(frame_r=frame_r, frame_i=frame_i,
w=w, t=t, sign=1.)
|
python
|
def static_to_constantrotating(frame_i, frame_r, w, t=None):
"""
Transform from an inertial static frame to a rotating frame.
Parameters
----------
frame_i : `~gala.potential.StaticFrame`
frame_r : `~gala.potential.ConstantRotatingFrame`
w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`
t : quantity_like (optional)
Required if input coordinates are just a phase-space position.
Returns
-------
pos : `~astropy.units.Quantity`
Position in rotating frame.
vel : `~astropy.units.Quantity`
Velocity in rotating frame.
"""
return _constantrotating_static_helper(frame_r=frame_r, frame_i=frame_i,
w=w, t=t, sign=1.)
|
[
"def",
"static_to_constantrotating",
"(",
"frame_i",
",",
"frame_r",
",",
"w",
",",
"t",
"=",
"None",
")",
":",
"return",
"_constantrotating_static_helper",
"(",
"frame_r",
"=",
"frame_r",
",",
"frame_i",
"=",
"frame_i",
",",
"w",
"=",
"w",
",",
"t",
"=",
"t",
",",
"sign",
"=",
"1.",
")"
] |
Transform from an inertial static frame to a rotating frame.
Parameters
----------
frame_i : `~gala.potential.StaticFrame`
frame_r : `~gala.potential.ConstantRotatingFrame`
w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`
t : quantity_like (optional)
Required if input coordinates are just a phase-space position.
Returns
-------
pos : `~astropy.units.Quantity`
Position in rotating frame.
vel : `~astropy.units.Quantity`
Velocity in rotating frame.
|
[
"Transform",
"from",
"an",
"inertial",
"static",
"frame",
"to",
"a",
"rotating",
"frame",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/potential/frame/builtin/transformations.py#L100-L120
|
train
|
adrn/gala
|
gala/potential/frame/builtin/transformations.py
|
constantrotating_to_static
|
def constantrotating_to_static(frame_r, frame_i, w, t=None):
"""
Transform from a constantly rotating frame to a static, inertial frame.
Parameters
----------
frame_i : `~gala.potential.StaticFrame`
frame_r : `~gala.potential.ConstantRotatingFrame`
w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`
t : quantity_like (optional)
Required if input coordinates are just a phase-space position.
Returns
-------
pos : `~astropy.units.Quantity`
Position in static, inertial frame.
vel : `~astropy.units.Quantity`
Velocity in static, inertial frame.
"""
return _constantrotating_static_helper(frame_r=frame_r, frame_i=frame_i,
w=w, t=t, sign=-1.)
|
python
|
def constantrotating_to_static(frame_r, frame_i, w, t=None):
"""
Transform from a constantly rotating frame to a static, inertial frame.
Parameters
----------
frame_i : `~gala.potential.StaticFrame`
frame_r : `~gala.potential.ConstantRotatingFrame`
w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`
t : quantity_like (optional)
Required if input coordinates are just a phase-space position.
Returns
-------
pos : `~astropy.units.Quantity`
Position in static, inertial frame.
vel : `~astropy.units.Quantity`
Velocity in static, inertial frame.
"""
return _constantrotating_static_helper(frame_r=frame_r, frame_i=frame_i,
w=w, t=t, sign=-1.)
|
[
"def",
"constantrotating_to_static",
"(",
"frame_r",
",",
"frame_i",
",",
"w",
",",
"t",
"=",
"None",
")",
":",
"return",
"_constantrotating_static_helper",
"(",
"frame_r",
"=",
"frame_r",
",",
"frame_i",
"=",
"frame_i",
",",
"w",
"=",
"w",
",",
"t",
"=",
"t",
",",
"sign",
"=",
"-",
"1.",
")"
] |
Transform from a constantly rotating frame to a static, inertial frame.
Parameters
----------
frame_i : `~gala.potential.StaticFrame`
frame_r : `~gala.potential.ConstantRotatingFrame`
w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`
t : quantity_like (optional)
Required if input coordinates are just a phase-space position.
Returns
-------
pos : `~astropy.units.Quantity`
Position in static, inertial frame.
vel : `~astropy.units.Quantity`
Velocity in static, inertial frame.
|
[
"Transform",
"from",
"a",
"constantly",
"rotating",
"frame",
"to",
"a",
"static",
"inertial",
"frame",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/potential/frame/builtin/transformations.py#L122-L142
|
train
|
adrn/gala
|
gala/potential/potential/io.py
|
to_dict
|
def to_dict(potential):
"""
Turn a potential object into a dictionary that fully specifies the
state of the object.
Parameters
----------
potential : :class:`~gala.potential.PotentialBase`
The instantiated :class:`~gala.potential.PotentialBase` object.
"""
from .. import potential as gp
if isinstance(potential, gp.CompositePotential):
d = dict()
d['class'] = potential.__class__.__name__
d['components'] = []
for k, p in potential.items():
comp_dict = _to_dict_help(p)
comp_dict['name'] = k
d['components'].append(comp_dict)
if potential.__class__.__name__ == 'CompositePotential' or \
potential.__class__.__name__ == 'CCompositePotential':
d['type'] = 'composite'
else:
d['type'] = 'custom'
else:
d = _to_dict_help(potential)
return d
|
python
|
def to_dict(potential):
"""
Turn a potential object into a dictionary that fully specifies the
state of the object.
Parameters
----------
potential : :class:`~gala.potential.PotentialBase`
The instantiated :class:`~gala.potential.PotentialBase` object.
"""
from .. import potential as gp
if isinstance(potential, gp.CompositePotential):
d = dict()
d['class'] = potential.__class__.__name__
d['components'] = []
for k, p in potential.items():
comp_dict = _to_dict_help(p)
comp_dict['name'] = k
d['components'].append(comp_dict)
if potential.__class__.__name__ == 'CompositePotential' or \
potential.__class__.__name__ == 'CCompositePotential':
d['type'] = 'composite'
else:
d['type'] = 'custom'
else:
d = _to_dict_help(potential)
return d
|
[
"def",
"to_dict",
"(",
"potential",
")",
":",
"from",
".",
".",
"import",
"potential",
"as",
"gp",
"if",
"isinstance",
"(",
"potential",
",",
"gp",
".",
"CompositePotential",
")",
":",
"d",
"=",
"dict",
"(",
")",
"d",
"[",
"'class'",
"]",
"=",
"potential",
".",
"__class__",
".",
"__name__",
"d",
"[",
"'components'",
"]",
"=",
"[",
"]",
"for",
"k",
",",
"p",
"in",
"potential",
".",
"items",
"(",
")",
":",
"comp_dict",
"=",
"_to_dict_help",
"(",
"p",
")",
"comp_dict",
"[",
"'name'",
"]",
"=",
"k",
"d",
"[",
"'components'",
"]",
".",
"append",
"(",
"comp_dict",
")",
"if",
"potential",
".",
"__class__",
".",
"__name__",
"==",
"'CompositePotential'",
"or",
"potential",
".",
"__class__",
".",
"__name__",
"==",
"'CCompositePotential'",
":",
"d",
"[",
"'type'",
"]",
"=",
"'composite'",
"else",
":",
"d",
"[",
"'type'",
"]",
"=",
"'custom'",
"else",
":",
"d",
"=",
"_to_dict_help",
"(",
"potential",
")",
"return",
"d"
] |
Turn a potential object into a dictionary that fully specifies the
state of the object.
Parameters
----------
potential : :class:`~gala.potential.PotentialBase`
The instantiated :class:`~gala.potential.PotentialBase` object.
|
[
"Turn",
"a",
"potential",
"object",
"into",
"a",
"dictionary",
"that",
"fully",
"specifies",
"the",
"state",
"of",
"the",
"object",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/potential/potential/io.py#L148-L179
|
train
|
adrn/gala
|
gala/integrate/core.py
|
Integrator._prepare_ws
|
def _prepare_ws(self, w0, mmap, n_steps):
"""
Decide how to make the return array. If mmap is False, this returns a
full array of zeros, but with the correct shape as the output. If mmap
is True, return a pointer to a memory-mapped array. The latter is
particularly useful for integrating a large number of orbits or
integrating a large number of time steps.
"""
from ..dynamics import PhaseSpacePosition
if not isinstance(w0, PhaseSpacePosition):
w0 = PhaseSpacePosition.from_w(w0)
arr_w0 = w0.w(self._func_units)
self.ndim, self.norbits = arr_w0.shape
self.ndim = self.ndim//2
return_shape = (2*self.ndim, n_steps+1, self.norbits)
if mmap is None:
# create the return arrays
ws = np.zeros(return_shape, dtype=float)
else:
if mmap.shape != return_shape:
raise ValueError("Shape of memory-mapped array doesn't match "
"expected shape of return array ({} vs {})"
.format(mmap.shape, return_shape))
if not mmap.flags.writeable:
raise TypeError("Memory-mapped array must be a writable mode, "
" not '{}'".format(mmap.mode))
ws = mmap
return w0, arr_w0, ws
|
python
|
def _prepare_ws(self, w0, mmap, n_steps):
"""
Decide how to make the return array. If mmap is False, this returns a
full array of zeros, but with the correct shape as the output. If mmap
is True, return a pointer to a memory-mapped array. The latter is
particularly useful for integrating a large number of orbits or
integrating a large number of time steps.
"""
from ..dynamics import PhaseSpacePosition
if not isinstance(w0, PhaseSpacePosition):
w0 = PhaseSpacePosition.from_w(w0)
arr_w0 = w0.w(self._func_units)
self.ndim, self.norbits = arr_w0.shape
self.ndim = self.ndim//2
return_shape = (2*self.ndim, n_steps+1, self.norbits)
if mmap is None:
# create the return arrays
ws = np.zeros(return_shape, dtype=float)
else:
if mmap.shape != return_shape:
raise ValueError("Shape of memory-mapped array doesn't match "
"expected shape of return array ({} vs {})"
.format(mmap.shape, return_shape))
if not mmap.flags.writeable:
raise TypeError("Memory-mapped array must be a writable mode, "
" not '{}'".format(mmap.mode))
ws = mmap
return w0, arr_w0, ws
|
[
"def",
"_prepare_ws",
"(",
"self",
",",
"w0",
",",
"mmap",
",",
"n_steps",
")",
":",
"from",
".",
".",
"dynamics",
"import",
"PhaseSpacePosition",
"if",
"not",
"isinstance",
"(",
"w0",
",",
"PhaseSpacePosition",
")",
":",
"w0",
"=",
"PhaseSpacePosition",
".",
"from_w",
"(",
"w0",
")",
"arr_w0",
"=",
"w0",
".",
"w",
"(",
"self",
".",
"_func_units",
")",
"self",
".",
"ndim",
",",
"self",
".",
"norbits",
"=",
"arr_w0",
".",
"shape",
"self",
".",
"ndim",
"=",
"self",
".",
"ndim",
"//",
"2",
"return_shape",
"=",
"(",
"2",
"*",
"self",
".",
"ndim",
",",
"n_steps",
"+",
"1",
",",
"self",
".",
"norbits",
")",
"if",
"mmap",
"is",
"None",
":",
"# create the return arrays",
"ws",
"=",
"np",
".",
"zeros",
"(",
"return_shape",
",",
"dtype",
"=",
"float",
")",
"else",
":",
"if",
"mmap",
".",
"shape",
"!=",
"return_shape",
":",
"raise",
"ValueError",
"(",
"\"Shape of memory-mapped array doesn't match \"",
"\"expected shape of return array ({} vs {})\"",
".",
"format",
"(",
"mmap",
".",
"shape",
",",
"return_shape",
")",
")",
"if",
"not",
"mmap",
".",
"flags",
".",
"writeable",
":",
"raise",
"TypeError",
"(",
"\"Memory-mapped array must be a writable mode, \"",
"\" not '{}'\"",
".",
"format",
"(",
"mmap",
".",
"mode",
")",
")",
"ws",
"=",
"mmap",
"return",
"w0",
",",
"arr_w0",
",",
"ws"
] |
Decide how to make the return array. If mmap is False, this returns a
full array of zeros, but with the correct shape as the output. If mmap
is True, return a pointer to a memory-mapped array. The latter is
particularly useful for integrating a large number of orbits or
integrating a large number of time steps.
|
[
"Decide",
"how",
"to",
"make",
"the",
"return",
"array",
".",
"If",
"mmap",
"is",
"False",
"this",
"returns",
"a",
"full",
"array",
"of",
"zeros",
"but",
"with",
"the",
"correct",
"shape",
"as",
"the",
"output",
".",
"If",
"mmap",
"is",
"True",
"return",
"a",
"pointer",
"to",
"a",
"memory",
"-",
"mapped",
"array",
".",
"The",
"latter",
"is",
"particularly",
"useful",
"for",
"integrating",
"a",
"large",
"number",
"of",
"orbits",
"or",
"integrating",
"a",
"large",
"number",
"of",
"time",
"steps",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/integrate/core.py#L44-L78
|
train
|
adrn/gala
|
gala/dynamics/nonlinear.py
|
fast_lyapunov_max
|
def fast_lyapunov_max(w0, hamiltonian, dt, n_steps, d0=1e-5,
n_steps_per_pullback=10, noffset_orbits=2, t1=0.,
atol=1E-10, rtol=1E-10, nmax=0, return_orbit=True):
"""
Compute the maximum Lyapunov exponent using a C-implemented estimator
that uses the DOPRI853 integrator.
Parameters
----------
w0 : `~gala.dynamics.PhaseSpacePosition`, array_like
Initial conditions.
hamiltonian : `~gala.potential.Hamiltonian`
dt : numeric
Timestep.
n_steps : int
Number of steps to run for.
d0 : numeric (optional)
The initial separation.
n_steps_per_pullback : int (optional)
Number of steps to run before re-normalizing the offset vectors.
noffset_orbits : int (optional)
Number of offset orbits to run.
t1 : numeric (optional)
Time of initial conditions. Assumed to be t=0.
return_orbit : bool (optional)
Store the full orbit for the parent and all offset orbits.
Returns
-------
LEs : :class:`~astropy.units.Quantity`
Lyapunov exponents calculated from each offset / deviation orbit.
orbit : `~gala.dynamics.Orbit` (optional)
"""
from .lyapunov import dop853_lyapunov_max, dop853_lyapunov_max_dont_save
# TODO: remove in v1.0
if isinstance(hamiltonian, PotentialBase):
from ..potential import Hamiltonian
hamiltonian = Hamiltonian(hamiltonian)
if not hamiltonian.c_enabled:
raise TypeError("Input Hamiltonian must contain a C-implemented "
"potential and frame.")
if not isinstance(w0, PhaseSpacePosition):
w0 = np.asarray(w0)
ndim = w0.shape[0]//2
w0 = PhaseSpacePosition(pos=w0[:ndim],
vel=w0[ndim:])
_w0 = np.squeeze(w0.w(hamiltonian.units))
if _w0.ndim > 1:
raise ValueError("Can only compute fast Lyapunov exponent for a single orbit.")
if return_orbit:
t,w,l = dop853_lyapunov_max(hamiltonian, _w0,
dt, n_steps+1, t1,
d0, n_steps_per_pullback, noffset_orbits,
atol, rtol, nmax)
w = np.rollaxis(w, -1)
try:
tunit = hamiltonian.units['time']
except (TypeError, AttributeError):
tunit = u.dimensionless_unscaled
orbit = Orbit.from_w(w=w, units=hamiltonian.units,
t=t*tunit, hamiltonian=hamiltonian)
return l/tunit, orbit
else:
l = dop853_lyapunov_max_dont_save(hamiltonian, _w0,
dt, n_steps+1, t1,
d0, n_steps_per_pullback, noffset_orbits,
atol, rtol, nmax)
try:
tunit = hamiltonian.units['time']
except (TypeError, AttributeError):
tunit = u.dimensionless_unscaled
return l/tunit
|
python
|
def fast_lyapunov_max(w0, hamiltonian, dt, n_steps, d0=1e-5,
n_steps_per_pullback=10, noffset_orbits=2, t1=0.,
atol=1E-10, rtol=1E-10, nmax=0, return_orbit=True):
"""
Compute the maximum Lyapunov exponent using a C-implemented estimator
that uses the DOPRI853 integrator.
Parameters
----------
w0 : `~gala.dynamics.PhaseSpacePosition`, array_like
Initial conditions.
hamiltonian : `~gala.potential.Hamiltonian`
dt : numeric
Timestep.
n_steps : int
Number of steps to run for.
d0 : numeric (optional)
The initial separation.
n_steps_per_pullback : int (optional)
Number of steps to run before re-normalizing the offset vectors.
noffset_orbits : int (optional)
Number of offset orbits to run.
t1 : numeric (optional)
Time of initial conditions. Assumed to be t=0.
return_orbit : bool (optional)
Store the full orbit for the parent and all offset orbits.
Returns
-------
LEs : :class:`~astropy.units.Quantity`
Lyapunov exponents calculated from each offset / deviation orbit.
orbit : `~gala.dynamics.Orbit` (optional)
"""
from .lyapunov import dop853_lyapunov_max, dop853_lyapunov_max_dont_save
# TODO: remove in v1.0
if isinstance(hamiltonian, PotentialBase):
from ..potential import Hamiltonian
hamiltonian = Hamiltonian(hamiltonian)
if not hamiltonian.c_enabled:
raise TypeError("Input Hamiltonian must contain a C-implemented "
"potential and frame.")
if not isinstance(w0, PhaseSpacePosition):
w0 = np.asarray(w0)
ndim = w0.shape[0]//2
w0 = PhaseSpacePosition(pos=w0[:ndim],
vel=w0[ndim:])
_w0 = np.squeeze(w0.w(hamiltonian.units))
if _w0.ndim > 1:
raise ValueError("Can only compute fast Lyapunov exponent for a single orbit.")
if return_orbit:
t,w,l = dop853_lyapunov_max(hamiltonian, _w0,
dt, n_steps+1, t1,
d0, n_steps_per_pullback, noffset_orbits,
atol, rtol, nmax)
w = np.rollaxis(w, -1)
try:
tunit = hamiltonian.units['time']
except (TypeError, AttributeError):
tunit = u.dimensionless_unscaled
orbit = Orbit.from_w(w=w, units=hamiltonian.units,
t=t*tunit, hamiltonian=hamiltonian)
return l/tunit, orbit
else:
l = dop853_lyapunov_max_dont_save(hamiltonian, _w0,
dt, n_steps+1, t1,
d0, n_steps_per_pullback, noffset_orbits,
atol, rtol, nmax)
try:
tunit = hamiltonian.units['time']
except (TypeError, AttributeError):
tunit = u.dimensionless_unscaled
return l/tunit
|
[
"def",
"fast_lyapunov_max",
"(",
"w0",
",",
"hamiltonian",
",",
"dt",
",",
"n_steps",
",",
"d0",
"=",
"1e-5",
",",
"n_steps_per_pullback",
"=",
"10",
",",
"noffset_orbits",
"=",
"2",
",",
"t1",
"=",
"0.",
",",
"atol",
"=",
"1E-10",
",",
"rtol",
"=",
"1E-10",
",",
"nmax",
"=",
"0",
",",
"return_orbit",
"=",
"True",
")",
":",
"from",
".",
"lyapunov",
"import",
"dop853_lyapunov_max",
",",
"dop853_lyapunov_max_dont_save",
"# TODO: remove in v1.0",
"if",
"isinstance",
"(",
"hamiltonian",
",",
"PotentialBase",
")",
":",
"from",
".",
".",
"potential",
"import",
"Hamiltonian",
"hamiltonian",
"=",
"Hamiltonian",
"(",
"hamiltonian",
")",
"if",
"not",
"hamiltonian",
".",
"c_enabled",
":",
"raise",
"TypeError",
"(",
"\"Input Hamiltonian must contain a C-implemented \"",
"\"potential and frame.\"",
")",
"if",
"not",
"isinstance",
"(",
"w0",
",",
"PhaseSpacePosition",
")",
":",
"w0",
"=",
"np",
".",
"asarray",
"(",
"w0",
")",
"ndim",
"=",
"w0",
".",
"shape",
"[",
"0",
"]",
"//",
"2",
"w0",
"=",
"PhaseSpacePosition",
"(",
"pos",
"=",
"w0",
"[",
":",
"ndim",
"]",
",",
"vel",
"=",
"w0",
"[",
"ndim",
":",
"]",
")",
"_w0",
"=",
"np",
".",
"squeeze",
"(",
"w0",
".",
"w",
"(",
"hamiltonian",
".",
"units",
")",
")",
"if",
"_w0",
".",
"ndim",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Can only compute fast Lyapunov exponent for a single orbit.\"",
")",
"if",
"return_orbit",
":",
"t",
",",
"w",
",",
"l",
"=",
"dop853_lyapunov_max",
"(",
"hamiltonian",
",",
"_w0",
",",
"dt",
",",
"n_steps",
"+",
"1",
",",
"t1",
",",
"d0",
",",
"n_steps_per_pullback",
",",
"noffset_orbits",
",",
"atol",
",",
"rtol",
",",
"nmax",
")",
"w",
"=",
"np",
".",
"rollaxis",
"(",
"w",
",",
"-",
"1",
")",
"try",
":",
"tunit",
"=",
"hamiltonian",
".",
"units",
"[",
"'time'",
"]",
"except",
"(",
"TypeError",
",",
"AttributeError",
")",
":",
"tunit",
"=",
"u",
".",
"dimensionless_unscaled",
"orbit",
"=",
"Orbit",
".",
"from_w",
"(",
"w",
"=",
"w",
",",
"units",
"=",
"hamiltonian",
".",
"units",
",",
"t",
"=",
"t",
"*",
"tunit",
",",
"hamiltonian",
"=",
"hamiltonian",
")",
"return",
"l",
"/",
"tunit",
",",
"orbit",
"else",
":",
"l",
"=",
"dop853_lyapunov_max_dont_save",
"(",
"hamiltonian",
",",
"_w0",
",",
"dt",
",",
"n_steps",
"+",
"1",
",",
"t1",
",",
"d0",
",",
"n_steps_per_pullback",
",",
"noffset_orbits",
",",
"atol",
",",
"rtol",
",",
"nmax",
")",
"try",
":",
"tunit",
"=",
"hamiltonian",
".",
"units",
"[",
"'time'",
"]",
"except",
"(",
"TypeError",
",",
"AttributeError",
")",
":",
"tunit",
"=",
"u",
".",
"dimensionless_unscaled",
"return",
"l",
"/",
"tunit"
] |
Compute the maximum Lyapunov exponent using a C-implemented estimator
that uses the DOPRI853 integrator.
Parameters
----------
w0 : `~gala.dynamics.PhaseSpacePosition`, array_like
Initial conditions.
hamiltonian : `~gala.potential.Hamiltonian`
dt : numeric
Timestep.
n_steps : int
Number of steps to run for.
d0 : numeric (optional)
The initial separation.
n_steps_per_pullback : int (optional)
Number of steps to run before re-normalizing the offset vectors.
noffset_orbits : int (optional)
Number of offset orbits to run.
t1 : numeric (optional)
Time of initial conditions. Assumed to be t=0.
return_orbit : bool (optional)
Store the full orbit for the parent and all offset orbits.
Returns
-------
LEs : :class:`~astropy.units.Quantity`
Lyapunov exponents calculated from each offset / deviation orbit.
orbit : `~gala.dynamics.Orbit` (optional)
|
[
"Compute",
"the",
"maximum",
"Lyapunov",
"exponent",
"using",
"a",
"C",
"-",
"implemented",
"estimator",
"that",
"uses",
"the",
"DOPRI853",
"integrator",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/nonlinear.py#L12-L95
|
train
|
adrn/gala
|
gala/dynamics/nonlinear.py
|
surface_of_section
|
def surface_of_section(orbit, plane_ix, interpolate=False):
"""
Generate and return a surface of section from the given orbit.
.. warning::
This is an experimental function and the API may change.
Parameters
----------
orbit : `~gala.dynamics.Orbit`
plane_ix : int
Integer that represents the coordinate to record crossings in. For
example, for a 2D Hamiltonian where you want to make a SoS in
:math:`y-p_y`, you would specify ``plane_ix=0`` (crossing the
:math:`x` axis), and this will only record crossings for which
:math:`p_x>0`.
interpolate : bool (optional)
Whether or not to interpolate on to the plane of interest. This
makes it much slower, but will work for orbits with a coarser
sampling.
Returns
-------
Examples
--------
If your orbit of interest is a tube orbit, it probably conserves (at
least approximately) some equivalent to angular momentum in the direction
of the circulation axis. Therefore, a surface of section in R-z should
be instructive for classifying these orbits. TODO...show how to convert
an orbit to Cylindrical..etc...
"""
w = orbit.w()
if w.ndim == 2:
w = w[...,None]
ndim,ntimes,norbits = w.shape
H_dim = ndim // 2
p_ix = plane_ix + H_dim
if interpolate:
raise NotImplementedError("Not yet implemented, sorry!")
# record position on specified plane when orbit crosses
all_sos = np.zeros((ndim,norbits), dtype=object)
for n in range(norbits):
cross_ix = argrelmin(w[plane_ix,:,n]**2)[0]
cross_ix = cross_ix[w[p_ix,cross_ix,n] > 0.]
sos = w[:,cross_ix,n]
for j in range(ndim):
all_sos[j,n] = sos[j,:]
return all_sos
|
python
|
def surface_of_section(orbit, plane_ix, interpolate=False):
"""
Generate and return a surface of section from the given orbit.
.. warning::
This is an experimental function and the API may change.
Parameters
----------
orbit : `~gala.dynamics.Orbit`
plane_ix : int
Integer that represents the coordinate to record crossings in. For
example, for a 2D Hamiltonian where you want to make a SoS in
:math:`y-p_y`, you would specify ``plane_ix=0`` (crossing the
:math:`x` axis), and this will only record crossings for which
:math:`p_x>0`.
interpolate : bool (optional)
Whether or not to interpolate on to the plane of interest. This
makes it much slower, but will work for orbits with a coarser
sampling.
Returns
-------
Examples
--------
If your orbit of interest is a tube orbit, it probably conserves (at
least approximately) some equivalent to angular momentum in the direction
of the circulation axis. Therefore, a surface of section in R-z should
be instructive for classifying these orbits. TODO...show how to convert
an orbit to Cylindrical..etc...
"""
w = orbit.w()
if w.ndim == 2:
w = w[...,None]
ndim,ntimes,norbits = w.shape
H_dim = ndim // 2
p_ix = plane_ix + H_dim
if interpolate:
raise NotImplementedError("Not yet implemented, sorry!")
# record position on specified plane when orbit crosses
all_sos = np.zeros((ndim,norbits), dtype=object)
for n in range(norbits):
cross_ix = argrelmin(w[plane_ix,:,n]**2)[0]
cross_ix = cross_ix[w[p_ix,cross_ix,n] > 0.]
sos = w[:,cross_ix,n]
for j in range(ndim):
all_sos[j,n] = sos[j,:]
return all_sos
|
[
"def",
"surface_of_section",
"(",
"orbit",
",",
"plane_ix",
",",
"interpolate",
"=",
"False",
")",
":",
"w",
"=",
"orbit",
".",
"w",
"(",
")",
"if",
"w",
".",
"ndim",
"==",
"2",
":",
"w",
"=",
"w",
"[",
"...",
",",
"None",
"]",
"ndim",
",",
"ntimes",
",",
"norbits",
"=",
"w",
".",
"shape",
"H_dim",
"=",
"ndim",
"//",
"2",
"p_ix",
"=",
"plane_ix",
"+",
"H_dim",
"if",
"interpolate",
":",
"raise",
"NotImplementedError",
"(",
"\"Not yet implemented, sorry!\"",
")",
"# record position on specified plane when orbit crosses",
"all_sos",
"=",
"np",
".",
"zeros",
"(",
"(",
"ndim",
",",
"norbits",
")",
",",
"dtype",
"=",
"object",
")",
"for",
"n",
"in",
"range",
"(",
"norbits",
")",
":",
"cross_ix",
"=",
"argrelmin",
"(",
"w",
"[",
"plane_ix",
",",
":",
",",
"n",
"]",
"**",
"2",
")",
"[",
"0",
"]",
"cross_ix",
"=",
"cross_ix",
"[",
"w",
"[",
"p_ix",
",",
"cross_ix",
",",
"n",
"]",
">",
"0.",
"]",
"sos",
"=",
"w",
"[",
":",
",",
"cross_ix",
",",
"n",
"]",
"for",
"j",
"in",
"range",
"(",
"ndim",
")",
":",
"all_sos",
"[",
"j",
",",
"n",
"]",
"=",
"sos",
"[",
"j",
",",
":",
"]",
"return",
"all_sos"
] |
Generate and return a surface of section from the given orbit.
.. warning::
This is an experimental function and the API may change.
Parameters
----------
orbit : `~gala.dynamics.Orbit`
plane_ix : int
Integer that represents the coordinate to record crossings in. For
example, for a 2D Hamiltonian where you want to make a SoS in
:math:`y-p_y`, you would specify ``plane_ix=0`` (crossing the
:math:`x` axis), and this will only record crossings for which
:math:`p_x>0`.
interpolate : bool (optional)
Whether or not to interpolate on to the plane of interest. This
makes it much slower, but will work for orbits with a coarser
sampling.
Returns
-------
Examples
--------
If your orbit of interest is a tube orbit, it probably conserves (at
least approximately) some equivalent to angular momentum in the direction
of the circulation axis. Therefore, a surface of section in R-z should
be instructive for classifying these orbits. TODO...show how to convert
an orbit to Cylindrical..etc...
|
[
"Generate",
"and",
"return",
"a",
"surface",
"of",
"section",
"from",
"the",
"given",
"orbit",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/nonlinear.py#L207-L263
|
train
|
adrn/gala
|
gala/potential/potential/core.py
|
PotentialBase._remove_units
|
def _remove_units(self, x):
"""
Always returns an array. If a Quantity is passed in, it converts to the
units associated with this object and returns the value.
"""
if hasattr(x, 'unit'):
x = x.decompose(self.units).value
else:
x = np.array(x)
return x
|
python
|
def _remove_units(self, x):
"""
Always returns an array. If a Quantity is passed in, it converts to the
units associated with this object and returns the value.
"""
if hasattr(x, 'unit'):
x = x.decompose(self.units).value
else:
x = np.array(x)
return x
|
[
"def",
"_remove_units",
"(",
"self",
",",
"x",
")",
":",
"if",
"hasattr",
"(",
"x",
",",
"'unit'",
")",
":",
"x",
"=",
"x",
".",
"decompose",
"(",
"self",
".",
"units",
")",
".",
"value",
"else",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"return",
"x"
] |
Always returns an array. If a Quantity is passed in, it converts to the
units associated with this object and returns the value.
|
[
"Always",
"returns",
"an",
"array",
".",
"If",
"a",
"Quantity",
"is",
"passed",
"in",
"it",
"converts",
"to",
"the",
"units",
"associated",
"with",
"this",
"object",
"and",
"returns",
"the",
"value",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/potential/potential/core.py#L96-L107
|
train
|
adrn/gala
|
gala/potential/potential/core.py
|
PotentialBase.mass_enclosed
|
def mass_enclosed(self, q, t=0.):
"""
Estimate the mass enclosed within the given position by assuming the potential
is spherical.
Parameters
----------
q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like
Position(s) to estimate the enclossed mass.
Returns
-------
menc : `~astropy.units.Quantity`
Mass enclosed at the given position(s). If the input position
has shape ``q.shape``, the output energy will have shape
``q.shape[1:]``.
"""
q = self._remove_units_prepare_shape(q)
orig_shape, q = self._get_c_valid_arr(q)
t = self._validate_prepare_time(t, q)
# small step-size in direction of q
h = 1E-3 # MAGIC NUMBER
# Radius
r = np.sqrt(np.sum(q**2, axis=1))
epsilon = h*q/r[:, np.newaxis]
dPhi_dr_plus = self._energy(q + epsilon, t=t)
dPhi_dr_minus = self._energy(q - epsilon, t=t)
diff = (dPhi_dr_plus - dPhi_dr_minus)
if isinstance(self.units, DimensionlessUnitSystem):
Gee = 1.
else:
Gee = G.decompose(self.units).value
Menc = np.abs(r*r * diff / Gee / (2.*h))
Menc = Menc.reshape(orig_shape[1:])
sgn = 1.
if 'm' in self.parameters and self.parameters['m'] < 0:
sgn = -1.
return sgn * Menc * self.units['mass']
|
python
|
def mass_enclosed(self, q, t=0.):
"""
Estimate the mass enclosed within the given position by assuming the potential
is spherical.
Parameters
----------
q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like
Position(s) to estimate the enclossed mass.
Returns
-------
menc : `~astropy.units.Quantity`
Mass enclosed at the given position(s). If the input position
has shape ``q.shape``, the output energy will have shape
``q.shape[1:]``.
"""
q = self._remove_units_prepare_shape(q)
orig_shape, q = self._get_c_valid_arr(q)
t = self._validate_prepare_time(t, q)
# small step-size in direction of q
h = 1E-3 # MAGIC NUMBER
# Radius
r = np.sqrt(np.sum(q**2, axis=1))
epsilon = h*q/r[:, np.newaxis]
dPhi_dr_plus = self._energy(q + epsilon, t=t)
dPhi_dr_minus = self._energy(q - epsilon, t=t)
diff = (dPhi_dr_plus - dPhi_dr_minus)
if isinstance(self.units, DimensionlessUnitSystem):
Gee = 1.
else:
Gee = G.decompose(self.units).value
Menc = np.abs(r*r * diff / Gee / (2.*h))
Menc = Menc.reshape(orig_shape[1:])
sgn = 1.
if 'm' in self.parameters and self.parameters['m'] < 0:
sgn = -1.
return sgn * Menc * self.units['mass']
|
[
"def",
"mass_enclosed",
"(",
"self",
",",
"q",
",",
"t",
"=",
"0.",
")",
":",
"q",
"=",
"self",
".",
"_remove_units_prepare_shape",
"(",
"q",
")",
"orig_shape",
",",
"q",
"=",
"self",
".",
"_get_c_valid_arr",
"(",
"q",
")",
"t",
"=",
"self",
".",
"_validate_prepare_time",
"(",
"t",
",",
"q",
")",
"# small step-size in direction of q",
"h",
"=",
"1E-3",
"# MAGIC NUMBER",
"# Radius",
"r",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"q",
"**",
"2",
",",
"axis",
"=",
"1",
")",
")",
"epsilon",
"=",
"h",
"*",
"q",
"/",
"r",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"dPhi_dr_plus",
"=",
"self",
".",
"_energy",
"(",
"q",
"+",
"epsilon",
",",
"t",
"=",
"t",
")",
"dPhi_dr_minus",
"=",
"self",
".",
"_energy",
"(",
"q",
"-",
"epsilon",
",",
"t",
"=",
"t",
")",
"diff",
"=",
"(",
"dPhi_dr_plus",
"-",
"dPhi_dr_minus",
")",
"if",
"isinstance",
"(",
"self",
".",
"units",
",",
"DimensionlessUnitSystem",
")",
":",
"Gee",
"=",
"1.",
"else",
":",
"Gee",
"=",
"G",
".",
"decompose",
"(",
"self",
".",
"units",
")",
".",
"value",
"Menc",
"=",
"np",
".",
"abs",
"(",
"r",
"*",
"r",
"*",
"diff",
"/",
"Gee",
"/",
"(",
"2.",
"*",
"h",
")",
")",
"Menc",
"=",
"Menc",
".",
"reshape",
"(",
"orig_shape",
"[",
"1",
":",
"]",
")",
"sgn",
"=",
"1.",
"if",
"'m'",
"in",
"self",
".",
"parameters",
"and",
"self",
".",
"parameters",
"[",
"'m'",
"]",
"<",
"0",
":",
"sgn",
"=",
"-",
"1.",
"return",
"sgn",
"*",
"Menc",
"*",
"self",
".",
"units",
"[",
"'mass'",
"]"
] |
Estimate the mass enclosed within the given position by assuming the potential
is spherical.
Parameters
----------
q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like
Position(s) to estimate the enclossed mass.
Returns
-------
menc : `~astropy.units.Quantity`
Mass enclosed at the given position(s). If the input position
has shape ``q.shape``, the output energy will have shape
``q.shape[1:]``.
|
[
"Estimate",
"the",
"mass",
"enclosed",
"within",
"the",
"given",
"position",
"by",
"assuming",
"the",
"potential",
"is",
"spherical",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/potential/potential/core.py#L246-L291
|
train
|
adrn/gala
|
gala/potential/potential/core.py
|
PotentialBase.circular_velocity
|
def circular_velocity(self, q, t=0.):
"""
Estimate the circular velocity at the given position assuming the
potential is spherical.
Parameters
----------
q : array_like, numeric
Position(s) to estimate the circular velocity.
Returns
-------
vcirc : `~astropy.units.Quantity`
Circular velocity at the given position(s). If the input position
has shape ``q.shape``, the output energy will have shape
``q.shape[1:]``.
"""
q = self._remove_units_prepare_shape(q)
# Radius
r = np.sqrt(np.sum(q**2, axis=0)) * self.units['length']
dPhi_dxyz = self.gradient(q, t=t)
dPhi_dr = np.sum(dPhi_dxyz * q/r.value, axis=0)
return self.units.decompose(np.sqrt(r * np.abs(dPhi_dr)))
|
python
|
def circular_velocity(self, q, t=0.):
"""
Estimate the circular velocity at the given position assuming the
potential is spherical.
Parameters
----------
q : array_like, numeric
Position(s) to estimate the circular velocity.
Returns
-------
vcirc : `~astropy.units.Quantity`
Circular velocity at the given position(s). If the input position
has shape ``q.shape``, the output energy will have shape
``q.shape[1:]``.
"""
q = self._remove_units_prepare_shape(q)
# Radius
r = np.sqrt(np.sum(q**2, axis=0)) * self.units['length']
dPhi_dxyz = self.gradient(q, t=t)
dPhi_dr = np.sum(dPhi_dxyz * q/r.value, axis=0)
return self.units.decompose(np.sqrt(r * np.abs(dPhi_dr)))
|
[
"def",
"circular_velocity",
"(",
"self",
",",
"q",
",",
"t",
"=",
"0.",
")",
":",
"q",
"=",
"self",
".",
"_remove_units_prepare_shape",
"(",
"q",
")",
"# Radius",
"r",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"q",
"**",
"2",
",",
"axis",
"=",
"0",
")",
")",
"*",
"self",
".",
"units",
"[",
"'length'",
"]",
"dPhi_dxyz",
"=",
"self",
".",
"gradient",
"(",
"q",
",",
"t",
"=",
"t",
")",
"dPhi_dr",
"=",
"np",
".",
"sum",
"(",
"dPhi_dxyz",
"*",
"q",
"/",
"r",
".",
"value",
",",
"axis",
"=",
"0",
")",
"return",
"self",
".",
"units",
".",
"decompose",
"(",
"np",
".",
"sqrt",
"(",
"r",
"*",
"np",
".",
"abs",
"(",
"dPhi_dr",
")",
")",
")"
] |
Estimate the circular velocity at the given position assuming the
potential is spherical.
Parameters
----------
q : array_like, numeric
Position(s) to estimate the circular velocity.
Returns
-------
vcirc : `~astropy.units.Quantity`
Circular velocity at the given position(s). If the input position
has shape ``q.shape``, the output energy will have shape
``q.shape[1:]``.
|
[
"Estimate",
"the",
"circular",
"velocity",
"at",
"the",
"given",
"position",
"assuming",
"the",
"potential",
"is",
"spherical",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/potential/potential/core.py#L293-L318
|
train
|
adrn/gala
|
gala/potential/potential/util.py
|
format_doc
|
def format_doc(*args, **kwargs):
"""
Replaces the docstring of the decorated object and then formats it.
Modeled after astropy.utils.decorators.format_doc
"""
def set_docstring(obj):
# None means: use the objects __doc__
doc = obj.__doc__
# Delete documentation in this case so we don't end up with
# awkwardly self-inserted docs.
obj.__doc__ = None
# If the original has a not-empty docstring append it to the format
# kwargs.
kwargs['__doc__'] = obj.__doc__ or ''
obj.__doc__ = doc.format(*args, **kwargs)
return obj
return set_docstring
|
python
|
def format_doc(*args, **kwargs):
"""
Replaces the docstring of the decorated object and then formats it.
Modeled after astropy.utils.decorators.format_doc
"""
def set_docstring(obj):
# None means: use the objects __doc__
doc = obj.__doc__
# Delete documentation in this case so we don't end up with
# awkwardly self-inserted docs.
obj.__doc__ = None
# If the original has a not-empty docstring append it to the format
# kwargs.
kwargs['__doc__'] = obj.__doc__ or ''
obj.__doc__ = doc.format(*args, **kwargs)
return obj
return set_docstring
|
[
"def",
"format_doc",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"set_docstring",
"(",
"obj",
")",
":",
"# None means: use the objects __doc__",
"doc",
"=",
"obj",
".",
"__doc__",
"# Delete documentation in this case so we don't end up with",
"# awkwardly self-inserted docs.",
"obj",
".",
"__doc__",
"=",
"None",
"# If the original has a not-empty docstring append it to the format",
"# kwargs.",
"kwargs",
"[",
"'__doc__'",
"]",
"=",
"obj",
".",
"__doc__",
"or",
"''",
"obj",
".",
"__doc__",
"=",
"doc",
".",
"format",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"obj",
"return",
"set_docstring"
] |
Replaces the docstring of the decorated object and then formats it.
Modeled after astropy.utils.decorators.format_doc
|
[
"Replaces",
"the",
"docstring",
"of",
"the",
"decorated",
"object",
"and",
"then",
"formats",
"it",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/potential/potential/util.py#L165-L184
|
train
|
adrn/gala
|
gala/io.py
|
quantity_to_hdf5
|
def quantity_to_hdf5(f, key, q):
"""
Turn an Astropy Quantity object into something we can write out to
an HDF5 file.
Parameters
----------
f : :class:`h5py.File`, :class:`h5py.Group`, :class:`h5py.DataSet`
key : str
The name.
q : float, `astropy.units.Quantity`
The quantity.
"""
if hasattr(q, 'unit'):
f[key] = q.value
f[key].attrs['unit'] = str(q.unit)
else:
f[key] = q
f[key].attrs['unit'] = ""
|
python
|
def quantity_to_hdf5(f, key, q):
"""
Turn an Astropy Quantity object into something we can write out to
an HDF5 file.
Parameters
----------
f : :class:`h5py.File`, :class:`h5py.Group`, :class:`h5py.DataSet`
key : str
The name.
q : float, `astropy.units.Quantity`
The quantity.
"""
if hasattr(q, 'unit'):
f[key] = q.value
f[key].attrs['unit'] = str(q.unit)
else:
f[key] = q
f[key].attrs['unit'] = ""
|
[
"def",
"quantity_to_hdf5",
"(",
"f",
",",
"key",
",",
"q",
")",
":",
"if",
"hasattr",
"(",
"q",
",",
"'unit'",
")",
":",
"f",
"[",
"key",
"]",
"=",
"q",
".",
"value",
"f",
"[",
"key",
"]",
".",
"attrs",
"[",
"'unit'",
"]",
"=",
"str",
"(",
"q",
".",
"unit",
")",
"else",
":",
"f",
"[",
"key",
"]",
"=",
"q",
"f",
"[",
"key",
"]",
".",
"attrs",
"[",
"'unit'",
"]",
"=",
"\"\""
] |
Turn an Astropy Quantity object into something we can write out to
an HDF5 file.
Parameters
----------
f : :class:`h5py.File`, :class:`h5py.Group`, :class:`h5py.DataSet`
key : str
The name.
q : float, `astropy.units.Quantity`
The quantity.
|
[
"Turn",
"an",
"Astropy",
"Quantity",
"object",
"into",
"something",
"we",
"can",
"write",
"out",
"to",
"an",
"HDF5",
"file",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/io.py#L27-L48
|
train
|
adrn/gala
|
gala/units.py
|
UnitSystem.get_constant
|
def get_constant(self, name):
"""
Retrieve a constant with specified name in this unit system.
Parameters
----------
name : str
The name of the constant, e.g., G.
Returns
-------
const : float
The value of the constant represented in this unit system.
Examples
--------
>>> usys = UnitSystem(u.kpc, u.Myr, u.radian, u.Msun)
>>> usys.get_constant('c')
306.6013937879527
"""
try:
c = getattr(const, name)
except AttributeError:
raise ValueError("Constant name '{}' doesn't exist in astropy.constants".format(name))
return c.decompose(self._core_units).value
|
python
|
def get_constant(self, name):
"""
Retrieve a constant with specified name in this unit system.
Parameters
----------
name : str
The name of the constant, e.g., G.
Returns
-------
const : float
The value of the constant represented in this unit system.
Examples
--------
>>> usys = UnitSystem(u.kpc, u.Myr, u.radian, u.Msun)
>>> usys.get_constant('c')
306.6013937879527
"""
try:
c = getattr(const, name)
except AttributeError:
raise ValueError("Constant name '{}' doesn't exist in astropy.constants".format(name))
return c.decompose(self._core_units).value
|
[
"def",
"get_constant",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"c",
"=",
"getattr",
"(",
"const",
",",
"name",
")",
"except",
"AttributeError",
":",
"raise",
"ValueError",
"(",
"\"Constant name '{}' doesn't exist in astropy.constants\"",
".",
"format",
"(",
"name",
")",
")",
"return",
"c",
".",
"decompose",
"(",
"self",
".",
"_core_units",
")",
".",
"value"
] |
Retrieve a constant with specified name in this unit system.
Parameters
----------
name : str
The name of the constant, e.g., G.
Returns
-------
const : float
The value of the constant represented in this unit system.
Examples
--------
>>> usys = UnitSystem(u.kpc, u.Myr, u.radian, u.Msun)
>>> usys.get_constant('c')
306.6013937879527
|
[
"Retrieve",
"a",
"constant",
"with",
"specified",
"name",
"in",
"this",
"unit",
"system",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/units.py#L160-L187
|
train
|
adrn/gala
|
gala/util.py
|
atleast_2d
|
def atleast_2d(*arys, **kwargs):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
insert_axis : int (optional)
Where to create a new axis if input array(s) have <2 dim.
Returns
-------
res, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
Examples
--------
>>> atleast_2d(3.0) # doctest: +FLOAT_CMP
array([[3.]])
>>> x = np.arange(3.0)
>>> atleast_2d(x) # doctest: +FLOAT_CMP
array([[0., 1., 2.]])
>>> atleast_2d(x, insert_axis=-1) # doctest: +FLOAT_CMP
array([[0.],
[1.],
[2.]])
>>> atleast_2d(x).base is x
True
>>> atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
insert_axis = kwargs.pop('insert_axis', 0)
slc = [slice(None)]*2
slc[insert_axis] = None
slc = tuple(slc)
res = []
for ary in arys:
ary = np.asanyarray(ary)
if len(ary.shape) == 0:
result = ary.reshape(1, 1)
elif len(ary.shape) == 1:
result = ary[slc]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
|
python
|
def atleast_2d(*arys, **kwargs):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
insert_axis : int (optional)
Where to create a new axis if input array(s) have <2 dim.
Returns
-------
res, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
Examples
--------
>>> atleast_2d(3.0) # doctest: +FLOAT_CMP
array([[3.]])
>>> x = np.arange(3.0)
>>> atleast_2d(x) # doctest: +FLOAT_CMP
array([[0., 1., 2.]])
>>> atleast_2d(x, insert_axis=-1) # doctest: +FLOAT_CMP
array([[0.],
[1.],
[2.]])
>>> atleast_2d(x).base is x
True
>>> atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
insert_axis = kwargs.pop('insert_axis', 0)
slc = [slice(None)]*2
slc[insert_axis] = None
slc = tuple(slc)
res = []
for ary in arys:
ary = np.asanyarray(ary)
if len(ary.shape) == 0:
result = ary.reshape(1, 1)
elif len(ary.shape) == 1:
result = ary[slc]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
|
[
"def",
"atleast_2d",
"(",
"*",
"arys",
",",
"*",
"*",
"kwargs",
")",
":",
"insert_axis",
"=",
"kwargs",
".",
"pop",
"(",
"'insert_axis'",
",",
"0",
")",
"slc",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"2",
"slc",
"[",
"insert_axis",
"]",
"=",
"None",
"slc",
"=",
"tuple",
"(",
"slc",
")",
"res",
"=",
"[",
"]",
"for",
"ary",
"in",
"arys",
":",
"ary",
"=",
"np",
".",
"asanyarray",
"(",
"ary",
")",
"if",
"len",
"(",
"ary",
".",
"shape",
")",
"==",
"0",
":",
"result",
"=",
"ary",
".",
"reshape",
"(",
"1",
",",
"1",
")",
"elif",
"len",
"(",
"ary",
".",
"shape",
")",
"==",
"1",
":",
"result",
"=",
"ary",
"[",
"slc",
"]",
"else",
":",
"result",
"=",
"ary",
"res",
".",
"append",
"(",
"result",
")",
"if",
"len",
"(",
"res",
")",
"==",
"1",
":",
"return",
"res",
"[",
"0",
"]",
"else",
":",
"return",
"res"
] |
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
insert_axis : int (optional)
Where to create a new axis if input array(s) have <2 dim.
Returns
-------
res, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
Examples
--------
>>> atleast_2d(3.0) # doctest: +FLOAT_CMP
array([[3.]])
>>> x = np.arange(3.0)
>>> atleast_2d(x) # doctest: +FLOAT_CMP
array([[0., 1., 2.]])
>>> atleast_2d(x, insert_axis=-1) # doctest: +FLOAT_CMP
array([[0.],
[1.],
[2.]])
>>> atleast_2d(x).base is x
True
>>> atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
|
[
"View",
"inputs",
"as",
"arrays",
"with",
"at",
"least",
"two",
"dimensions",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/util.py#L113-L170
|
train
|
adrn/gala
|
gala/coordinates/quaternion.py
|
Quaternion.from_v_theta
|
def from_v_theta(cls, v, theta):
"""
Create a quaternion from unit vector v and rotation angle theta.
Returns
-------
q : :class:`gala.coordinates.Quaternion`
A ``Quaternion`` instance.
"""
theta = np.asarray(theta)
v = np.asarray(v)
s = np.sin(0.5 * theta)
c = np.cos(0.5 * theta)
vnrm = np.sqrt(np.sum(v * v))
q = np.concatenate([[c], s * v / vnrm])
return cls(q)
|
python
|
def from_v_theta(cls, v, theta):
"""
Create a quaternion from unit vector v and rotation angle theta.
Returns
-------
q : :class:`gala.coordinates.Quaternion`
A ``Quaternion`` instance.
"""
theta = np.asarray(theta)
v = np.asarray(v)
s = np.sin(0.5 * theta)
c = np.cos(0.5 * theta)
vnrm = np.sqrt(np.sum(v * v))
q = np.concatenate([[c], s * v / vnrm])
return cls(q)
|
[
"def",
"from_v_theta",
"(",
"cls",
",",
"v",
",",
"theta",
")",
":",
"theta",
"=",
"np",
".",
"asarray",
"(",
"theta",
")",
"v",
"=",
"np",
".",
"asarray",
"(",
"v",
")",
"s",
"=",
"np",
".",
"sin",
"(",
"0.5",
"*",
"theta",
")",
"c",
"=",
"np",
".",
"cos",
"(",
"0.5",
"*",
"theta",
")",
"vnrm",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"v",
"*",
"v",
")",
")",
"q",
"=",
"np",
".",
"concatenate",
"(",
"[",
"[",
"c",
"]",
",",
"s",
"*",
"v",
"/",
"vnrm",
"]",
")",
"return",
"cls",
"(",
"q",
")"
] |
Create a quaternion from unit vector v and rotation angle theta.
Returns
-------
q : :class:`gala.coordinates.Quaternion`
A ``Quaternion`` instance.
|
[
"Create",
"a",
"quaternion",
"from",
"unit",
"vector",
"v",
"and",
"rotation",
"angle",
"theta",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/coordinates/quaternion.py#L27-L45
|
train
|
adrn/gala
|
gala/coordinates/quaternion.py
|
Quaternion.random
|
def random(cls):
"""
Randomly sample a Quaternion from a distribution uniform in
3D rotation angles.
https://www-preview.ri.cmu.edu/pub_files/pub4/kuffner_james_2004_1/kuffner_james_2004_1.pdf
Returns
-------
q : :class:`gala.coordinates.Quaternion`
A randomly sampled ``Quaternion`` instance.
"""
s = np.random.uniform()
s1 = np.sqrt(1 - s)
s2 = np.sqrt(s)
t1 = np.random.uniform(0, 2*np.pi)
t2 = np.random.uniform(0, 2*np.pi)
w = np.cos(t2)*s2
x = np.sin(t1)*s1
y = np.cos(t1)*s1
z = np.sin(t2)*s2
return cls([w,x,y,z])
|
python
|
def random(cls):
"""
Randomly sample a Quaternion from a distribution uniform in
3D rotation angles.
https://www-preview.ri.cmu.edu/pub_files/pub4/kuffner_james_2004_1/kuffner_james_2004_1.pdf
Returns
-------
q : :class:`gala.coordinates.Quaternion`
A randomly sampled ``Quaternion`` instance.
"""
s = np.random.uniform()
s1 = np.sqrt(1 - s)
s2 = np.sqrt(s)
t1 = np.random.uniform(0, 2*np.pi)
t2 = np.random.uniform(0, 2*np.pi)
w = np.cos(t2)*s2
x = np.sin(t1)*s1
y = np.cos(t1)*s1
z = np.sin(t2)*s2
return cls([w,x,y,z])
|
[
"def",
"random",
"(",
"cls",
")",
":",
"s",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
")",
"s1",
"=",
"np",
".",
"sqrt",
"(",
"1",
"-",
"s",
")",
"s2",
"=",
"np",
".",
"sqrt",
"(",
"s",
")",
"t1",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"2",
"*",
"np",
".",
"pi",
")",
"t2",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"2",
"*",
"np",
".",
"pi",
")",
"w",
"=",
"np",
".",
"cos",
"(",
"t2",
")",
"*",
"s2",
"x",
"=",
"np",
".",
"sin",
"(",
"t1",
")",
"*",
"s1",
"y",
"=",
"np",
".",
"cos",
"(",
"t1",
")",
"*",
"s1",
"z",
"=",
"np",
".",
"sin",
"(",
"t2",
")",
"*",
"s2",
"return",
"cls",
"(",
"[",
"w",
",",
"x",
",",
"y",
",",
"z",
"]",
")"
] |
Randomly sample a Quaternion from a distribution uniform in
3D rotation angles.
https://www-preview.ri.cmu.edu/pub_files/pub4/kuffner_james_2004_1/kuffner_james_2004_1.pdf
Returns
-------
q : :class:`gala.coordinates.Quaternion`
A randomly sampled ``Quaternion`` instance.
|
[
"Randomly",
"sample",
"a",
"Quaternion",
"from",
"a",
"distribution",
"uniform",
"in",
"3D",
"rotation",
"angles",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/coordinates/quaternion.py#L112-L137
|
train
|
adrn/gala
|
gala/integrate/pyintegrators/leapfrog.py
|
LeapfrogIntegrator.step
|
def step(self, t, x_im1, v_im1_2, dt):
"""
Step forward the positions and velocities by the given timestep.
Parameters
----------
dt : numeric
The timestep to move forward.
"""
x_i = x_im1 + v_im1_2 * dt
F_i = self.F(t, np.vstack((x_i, v_im1_2)), *self._func_args)
a_i = F_i[self.ndim:]
v_i = v_im1_2 + a_i * dt / 2
v_ip1_2 = v_i + a_i * dt / 2
return x_i, v_i, v_ip1_2
|
python
|
def step(self, t, x_im1, v_im1_2, dt):
"""
Step forward the positions and velocities by the given timestep.
Parameters
----------
dt : numeric
The timestep to move forward.
"""
x_i = x_im1 + v_im1_2 * dt
F_i = self.F(t, np.vstack((x_i, v_im1_2)), *self._func_args)
a_i = F_i[self.ndim:]
v_i = v_im1_2 + a_i * dt / 2
v_ip1_2 = v_i + a_i * dt / 2
return x_i, v_i, v_ip1_2
|
[
"def",
"step",
"(",
"self",
",",
"t",
",",
"x_im1",
",",
"v_im1_2",
",",
"dt",
")",
":",
"x_i",
"=",
"x_im1",
"+",
"v_im1_2",
"*",
"dt",
"F_i",
"=",
"self",
".",
"F",
"(",
"t",
",",
"np",
".",
"vstack",
"(",
"(",
"x_i",
",",
"v_im1_2",
")",
")",
",",
"*",
"self",
".",
"_func_args",
")",
"a_i",
"=",
"F_i",
"[",
"self",
".",
"ndim",
":",
"]",
"v_i",
"=",
"v_im1_2",
"+",
"a_i",
"*",
"dt",
"/",
"2",
"v_ip1_2",
"=",
"v_i",
"+",
"a_i",
"*",
"dt",
"/",
"2",
"return",
"x_i",
",",
"v_i",
",",
"v_ip1_2"
] |
Step forward the positions and velocities by the given timestep.
Parameters
----------
dt : numeric
The timestep to move forward.
|
[
"Step",
"forward",
"the",
"positions",
"and",
"velocities",
"by",
"the",
"given",
"timestep",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/integrate/pyintegrators/leapfrog.py#L93-L110
|
train
|
adrn/gala
|
gala/dynamics/actionangle.py
|
fit_isochrone
|
def fit_isochrone(orbit, m0=2E11, b0=1., minimize_kwargs=None):
r"""
Fit the toy Isochrone potential to the sum of the energy residuals relative
to the mean energy by minimizing the function
.. math::
f(m,b) = \sum_i (\frac{1}{2}v_i^2 + \Phi_{\rm iso}(x_i\,|\,m,b) - <E>)^2
TODO: This should fail if the Hamiltonian associated with the orbit has
a frame other than StaticFrame
Parameters
----------
orbit : `~gala.dynamics.Orbit`
m0 : numeric (optional)
Initial mass guess.
b0 : numeric (optional)
Initial b guess.
minimize_kwargs : dict (optional)
Keyword arguments to pass through to `scipy.optimize.minimize`.
Returns
-------
m : float
Best-fit scale mass for the Isochrone potential.
b : float
Best-fit core radius for the Isochrone potential.
"""
pot = orbit.hamiltonian.potential
if pot is None:
raise ValueError("The orbit object must have an associated potential")
w = np.squeeze(orbit.w(pot.units))
if w.ndim > 2:
raise ValueError("Input orbit object must be a single orbit.")
def f(p, w):
logm, logb = p
potential = IsochronePotential(m=np.exp(logm), b=np.exp(logb),
units=pot.units)
H = (potential.value(w[:3]).decompose(pot.units).value +
0.5*np.sum(w[3:]**2, axis=0))
return np.sum(np.squeeze(H - np.mean(H))**2)
logm0 = np.log(m0)
logb0 = np.log(b0)
if minimize_kwargs is None:
minimize_kwargs = dict()
minimize_kwargs['x0'] = np.array([logm0, logb0])
minimize_kwargs['method'] = minimize_kwargs.get('method', 'Nelder-Mead')
res = minimize(f, args=(w,), **minimize_kwargs)
if not res.success:
raise ValueError("Failed to fit toy potential to orbit.")
logm, logb = np.abs(res.x)
m = np.exp(logm)
b = np.exp(logb)
return IsochronePotential(m=m, b=b, units=pot.units)
|
python
|
def fit_isochrone(orbit, m0=2E11, b0=1., minimize_kwargs=None):
r"""
Fit the toy Isochrone potential to the sum of the energy residuals relative
to the mean energy by minimizing the function
.. math::
f(m,b) = \sum_i (\frac{1}{2}v_i^2 + \Phi_{\rm iso}(x_i\,|\,m,b) - <E>)^2
TODO: This should fail if the Hamiltonian associated with the orbit has
a frame other than StaticFrame
Parameters
----------
orbit : `~gala.dynamics.Orbit`
m0 : numeric (optional)
Initial mass guess.
b0 : numeric (optional)
Initial b guess.
minimize_kwargs : dict (optional)
Keyword arguments to pass through to `scipy.optimize.minimize`.
Returns
-------
m : float
Best-fit scale mass for the Isochrone potential.
b : float
Best-fit core radius for the Isochrone potential.
"""
pot = orbit.hamiltonian.potential
if pot is None:
raise ValueError("The orbit object must have an associated potential")
w = np.squeeze(orbit.w(pot.units))
if w.ndim > 2:
raise ValueError("Input orbit object must be a single orbit.")
def f(p, w):
logm, logb = p
potential = IsochronePotential(m=np.exp(logm), b=np.exp(logb),
units=pot.units)
H = (potential.value(w[:3]).decompose(pot.units).value +
0.5*np.sum(w[3:]**2, axis=0))
return np.sum(np.squeeze(H - np.mean(H))**2)
logm0 = np.log(m0)
logb0 = np.log(b0)
if minimize_kwargs is None:
minimize_kwargs = dict()
minimize_kwargs['x0'] = np.array([logm0, logb0])
minimize_kwargs['method'] = minimize_kwargs.get('method', 'Nelder-Mead')
res = minimize(f, args=(w,), **minimize_kwargs)
if not res.success:
raise ValueError("Failed to fit toy potential to orbit.")
logm, logb = np.abs(res.x)
m = np.exp(logm)
b = np.exp(logb)
return IsochronePotential(m=m, b=b, units=pot.units)
|
[
"def",
"fit_isochrone",
"(",
"orbit",
",",
"m0",
"=",
"2E11",
",",
"b0",
"=",
"1.",
",",
"minimize_kwargs",
"=",
"None",
")",
":",
"pot",
"=",
"orbit",
".",
"hamiltonian",
".",
"potential",
"if",
"pot",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"The orbit object must have an associated potential\"",
")",
"w",
"=",
"np",
".",
"squeeze",
"(",
"orbit",
".",
"w",
"(",
"pot",
".",
"units",
")",
")",
"if",
"w",
".",
"ndim",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"Input orbit object must be a single orbit.\"",
")",
"def",
"f",
"(",
"p",
",",
"w",
")",
":",
"logm",
",",
"logb",
"=",
"p",
"potential",
"=",
"IsochronePotential",
"(",
"m",
"=",
"np",
".",
"exp",
"(",
"logm",
")",
",",
"b",
"=",
"np",
".",
"exp",
"(",
"logb",
")",
",",
"units",
"=",
"pot",
".",
"units",
")",
"H",
"=",
"(",
"potential",
".",
"value",
"(",
"w",
"[",
":",
"3",
"]",
")",
".",
"decompose",
"(",
"pot",
".",
"units",
")",
".",
"value",
"+",
"0.5",
"*",
"np",
".",
"sum",
"(",
"w",
"[",
"3",
":",
"]",
"**",
"2",
",",
"axis",
"=",
"0",
")",
")",
"return",
"np",
".",
"sum",
"(",
"np",
".",
"squeeze",
"(",
"H",
"-",
"np",
".",
"mean",
"(",
"H",
")",
")",
"**",
"2",
")",
"logm0",
"=",
"np",
".",
"log",
"(",
"m0",
")",
"logb0",
"=",
"np",
".",
"log",
"(",
"b0",
")",
"if",
"minimize_kwargs",
"is",
"None",
":",
"minimize_kwargs",
"=",
"dict",
"(",
")",
"minimize_kwargs",
"[",
"'x0'",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"logm0",
",",
"logb0",
"]",
")",
"minimize_kwargs",
"[",
"'method'",
"]",
"=",
"minimize_kwargs",
".",
"get",
"(",
"'method'",
",",
"'Nelder-Mead'",
")",
"res",
"=",
"minimize",
"(",
"f",
",",
"args",
"=",
"(",
"w",
",",
")",
",",
"*",
"*",
"minimize_kwargs",
")",
"if",
"not",
"res",
".",
"success",
":",
"raise",
"ValueError",
"(",
"\"Failed to fit toy potential to orbit.\"",
")",
"logm",
",",
"logb",
"=",
"np",
".",
"abs",
"(",
"res",
".",
"x",
")",
"m",
"=",
"np",
".",
"exp",
"(",
"logm",
")",
"b",
"=",
"np",
".",
"exp",
"(",
"logb",
")",
"return",
"IsochronePotential",
"(",
"m",
"=",
"m",
",",
"b",
"=",
"b",
",",
"units",
"=",
"pot",
".",
"units",
")"
] |
r"""
Fit the toy Isochrone potential to the sum of the energy residuals relative
to the mean energy by minimizing the function
.. math::
f(m,b) = \sum_i (\frac{1}{2}v_i^2 + \Phi_{\rm iso}(x_i\,|\,m,b) - <E>)^2
TODO: This should fail if the Hamiltonian associated with the orbit has
a frame other than StaticFrame
Parameters
----------
orbit : `~gala.dynamics.Orbit`
m0 : numeric (optional)
Initial mass guess.
b0 : numeric (optional)
Initial b guess.
minimize_kwargs : dict (optional)
Keyword arguments to pass through to `scipy.optimize.minimize`.
Returns
-------
m : float
Best-fit scale mass for the Isochrone potential.
b : float
Best-fit core radius for the Isochrone potential.
|
[
"r",
"Fit",
"the",
"toy",
"Isochrone",
"potential",
"to",
"the",
"sum",
"of",
"the",
"energy",
"residuals",
"relative",
"to",
"the",
"mean",
"energy",
"by",
"minimizing",
"the",
"function"
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/actionangle.py#L78-L140
|
train
|
adrn/gala
|
gala/dynamics/actionangle.py
|
fit_harmonic_oscillator
|
def fit_harmonic_oscillator(orbit, omega0=[1., 1, 1], minimize_kwargs=None):
r"""
Fit the toy harmonic oscillator potential to the sum of the energy
residuals relative to the mean energy by minimizing the function
.. math::
f(\boldsymbol{\omega}) = \sum_i (\frac{1}{2}v_i^2 + \Phi_{\rm sho}(x_i\,|\,\boldsymbol{\omega}) - <E>)^2
TODO: This should fail if the Hamiltonian associated with the orbit has
a frame other than StaticFrame
Parameters
----------
orbit : `~gala.dynamics.Orbit`
omega0 : array_like (optional)
Initial frequency guess.
minimize_kwargs : dict (optional)
Keyword arguments to pass through to `scipy.optimize.minimize`.
Returns
-------
omegas : float
Best-fit harmonic oscillator frequencies.
"""
omega0 = np.atleast_1d(omega0)
pot = orbit.hamiltonian.potential
if pot is None:
raise ValueError("The orbit object must have an associated potential")
w = np.squeeze(orbit.w(pot.units))
if w.ndim > 2:
raise ValueError("Input orbit object must be a single orbit.")
def f(omega, w):
potential = HarmonicOscillatorPotential(omega=omega, units=pot.units)
H = (potential.value(w[:3]).decompose(pot.units).value +
0.5*np.sum(w[3:]**2, axis=0))
return np.sum(np.squeeze(H - np.mean(H))**2)
if minimize_kwargs is None:
minimize_kwargs = dict()
minimize_kwargs['x0'] = omega0
minimize_kwargs['method'] = minimize_kwargs.get('method', 'Nelder-Mead')
res = minimize(f, args=(w,), **minimize_kwargs)
if not res.success:
raise ValueError("Failed to fit toy potential to orbit.")
best_omega = np.abs(res.x)
return HarmonicOscillatorPotential(omega=best_omega, units=pot.units)
|
python
|
def fit_harmonic_oscillator(orbit, omega0=[1., 1, 1], minimize_kwargs=None):
r"""
Fit the toy harmonic oscillator potential to the sum of the energy
residuals relative to the mean energy by minimizing the function
.. math::
f(\boldsymbol{\omega}) = \sum_i (\frac{1}{2}v_i^2 + \Phi_{\rm sho}(x_i\,|\,\boldsymbol{\omega}) - <E>)^2
TODO: This should fail if the Hamiltonian associated with the orbit has
a frame other than StaticFrame
Parameters
----------
orbit : `~gala.dynamics.Orbit`
omega0 : array_like (optional)
Initial frequency guess.
minimize_kwargs : dict (optional)
Keyword arguments to pass through to `scipy.optimize.minimize`.
Returns
-------
omegas : float
Best-fit harmonic oscillator frequencies.
"""
omega0 = np.atleast_1d(omega0)
pot = orbit.hamiltonian.potential
if pot is None:
raise ValueError("The orbit object must have an associated potential")
w = np.squeeze(orbit.w(pot.units))
if w.ndim > 2:
raise ValueError("Input orbit object must be a single orbit.")
def f(omega, w):
potential = HarmonicOscillatorPotential(omega=omega, units=pot.units)
H = (potential.value(w[:3]).decompose(pot.units).value +
0.5*np.sum(w[3:]**2, axis=0))
return np.sum(np.squeeze(H - np.mean(H))**2)
if minimize_kwargs is None:
minimize_kwargs = dict()
minimize_kwargs['x0'] = omega0
minimize_kwargs['method'] = minimize_kwargs.get('method', 'Nelder-Mead')
res = minimize(f, args=(w,), **minimize_kwargs)
if not res.success:
raise ValueError("Failed to fit toy potential to orbit.")
best_omega = np.abs(res.x)
return HarmonicOscillatorPotential(omega=best_omega, units=pot.units)
|
[
"def",
"fit_harmonic_oscillator",
"(",
"orbit",
",",
"omega0",
"=",
"[",
"1.",
",",
"1",
",",
"1",
"]",
",",
"minimize_kwargs",
"=",
"None",
")",
":",
"omega0",
"=",
"np",
".",
"atleast_1d",
"(",
"omega0",
")",
"pot",
"=",
"orbit",
".",
"hamiltonian",
".",
"potential",
"if",
"pot",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"The orbit object must have an associated potential\"",
")",
"w",
"=",
"np",
".",
"squeeze",
"(",
"orbit",
".",
"w",
"(",
"pot",
".",
"units",
")",
")",
"if",
"w",
".",
"ndim",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"Input orbit object must be a single orbit.\"",
")",
"def",
"f",
"(",
"omega",
",",
"w",
")",
":",
"potential",
"=",
"HarmonicOscillatorPotential",
"(",
"omega",
"=",
"omega",
",",
"units",
"=",
"pot",
".",
"units",
")",
"H",
"=",
"(",
"potential",
".",
"value",
"(",
"w",
"[",
":",
"3",
"]",
")",
".",
"decompose",
"(",
"pot",
".",
"units",
")",
".",
"value",
"+",
"0.5",
"*",
"np",
".",
"sum",
"(",
"w",
"[",
"3",
":",
"]",
"**",
"2",
",",
"axis",
"=",
"0",
")",
")",
"return",
"np",
".",
"sum",
"(",
"np",
".",
"squeeze",
"(",
"H",
"-",
"np",
".",
"mean",
"(",
"H",
")",
")",
"**",
"2",
")",
"if",
"minimize_kwargs",
"is",
"None",
":",
"minimize_kwargs",
"=",
"dict",
"(",
")",
"minimize_kwargs",
"[",
"'x0'",
"]",
"=",
"omega0",
"minimize_kwargs",
"[",
"'method'",
"]",
"=",
"minimize_kwargs",
".",
"get",
"(",
"'method'",
",",
"'Nelder-Mead'",
")",
"res",
"=",
"minimize",
"(",
"f",
",",
"args",
"=",
"(",
"w",
",",
")",
",",
"*",
"*",
"minimize_kwargs",
")",
"if",
"not",
"res",
".",
"success",
":",
"raise",
"ValueError",
"(",
"\"Failed to fit toy potential to orbit.\"",
")",
"best_omega",
"=",
"np",
".",
"abs",
"(",
"res",
".",
"x",
")",
"return",
"HarmonicOscillatorPotential",
"(",
"omega",
"=",
"best_omega",
",",
"units",
"=",
"pot",
".",
"units",
")"
] |
r"""
Fit the toy harmonic oscillator potential to the sum of the energy
residuals relative to the mean energy by minimizing the function
.. math::
f(\boldsymbol{\omega}) = \sum_i (\frac{1}{2}v_i^2 + \Phi_{\rm sho}(x_i\,|\,\boldsymbol{\omega}) - <E>)^2
TODO: This should fail if the Hamiltonian associated with the orbit has
a frame other than StaticFrame
Parameters
----------
orbit : `~gala.dynamics.Orbit`
omega0 : array_like (optional)
Initial frequency guess.
minimize_kwargs : dict (optional)
Keyword arguments to pass through to `scipy.optimize.minimize`.
Returns
-------
omegas : float
Best-fit harmonic oscillator frequencies.
|
[
"r",
"Fit",
"the",
"toy",
"harmonic",
"oscillator",
"potential",
"to",
"the",
"sum",
"of",
"the",
"energy",
"residuals",
"relative",
"to",
"the",
"mean",
"energy",
"by",
"minimizing",
"the",
"function"
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/actionangle.py#L142-L194
|
train
|
adrn/gala
|
gala/dynamics/actionangle.py
|
check_angle_sampling
|
def check_angle_sampling(nvecs, angles):
"""
Returns a list of the index of elements of n which do not have adequate
toy angle coverage. The criterion is that we must have at least one sample
in each Nyquist box when we project the toy angles along the vector n.
Parameters
----------
nvecs : array_like
Array of integer vectors.
angles : array_like
Array of angles.
Returns
-------
failed_nvecs : :class:`numpy.ndarray`
Array of all integer vectors that failed checks. Has shape (N,3).
failures : :class:`numpy.ndarray`
Array of flags that designate whether this failed needing a longer
integration window (0) or finer sampling (1).
"""
failed_nvecs = []
failures = []
for i, vec in enumerate(nvecs):
# N = np.linalg.norm(vec)
# X = np.dot(angles,vec)
X = (angles*vec[:, None]).sum(axis=0)
diff = float(np.abs(X.max() - X.min()))
if diff < (2.*np.pi):
warnings.warn("Need a longer integration window for mode {0}"
.format(vec))
failed_nvecs.append(vec.tolist())
# P.append(2.*np.pi - diff)
failures.append(0)
elif (diff/len(X)) > np.pi:
warnings.warn("Need a finer sampling for mode {0}"
.format(str(vec)))
failed_nvecs.append(vec.tolist())
# P.append(np.pi - diff/len(X))
failures.append(1)
return np.array(failed_nvecs), np.array(failures)
|
python
|
def check_angle_sampling(nvecs, angles):
"""
Returns a list of the index of elements of n which do not have adequate
toy angle coverage. The criterion is that we must have at least one sample
in each Nyquist box when we project the toy angles along the vector n.
Parameters
----------
nvecs : array_like
Array of integer vectors.
angles : array_like
Array of angles.
Returns
-------
failed_nvecs : :class:`numpy.ndarray`
Array of all integer vectors that failed checks. Has shape (N,3).
failures : :class:`numpy.ndarray`
Array of flags that designate whether this failed needing a longer
integration window (0) or finer sampling (1).
"""
failed_nvecs = []
failures = []
for i, vec in enumerate(nvecs):
# N = np.linalg.norm(vec)
# X = np.dot(angles,vec)
X = (angles*vec[:, None]).sum(axis=0)
diff = float(np.abs(X.max() - X.min()))
if diff < (2.*np.pi):
warnings.warn("Need a longer integration window for mode {0}"
.format(vec))
failed_nvecs.append(vec.tolist())
# P.append(2.*np.pi - diff)
failures.append(0)
elif (diff/len(X)) > np.pi:
warnings.warn("Need a finer sampling for mode {0}"
.format(str(vec)))
failed_nvecs.append(vec.tolist())
# P.append(np.pi - diff/len(X))
failures.append(1)
return np.array(failed_nvecs), np.array(failures)
|
[
"def",
"check_angle_sampling",
"(",
"nvecs",
",",
"angles",
")",
":",
"failed_nvecs",
"=",
"[",
"]",
"failures",
"=",
"[",
"]",
"for",
"i",
",",
"vec",
"in",
"enumerate",
"(",
"nvecs",
")",
":",
"# N = np.linalg.norm(vec)",
"# X = np.dot(angles,vec)",
"X",
"=",
"(",
"angles",
"*",
"vec",
"[",
":",
",",
"None",
"]",
")",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"diff",
"=",
"float",
"(",
"np",
".",
"abs",
"(",
"X",
".",
"max",
"(",
")",
"-",
"X",
".",
"min",
"(",
")",
")",
")",
"if",
"diff",
"<",
"(",
"2.",
"*",
"np",
".",
"pi",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Need a longer integration window for mode {0}\"",
".",
"format",
"(",
"vec",
")",
")",
"failed_nvecs",
".",
"append",
"(",
"vec",
".",
"tolist",
"(",
")",
")",
"# P.append(2.*np.pi - diff)",
"failures",
".",
"append",
"(",
"0",
")",
"elif",
"(",
"diff",
"/",
"len",
"(",
"X",
")",
")",
">",
"np",
".",
"pi",
":",
"warnings",
".",
"warn",
"(",
"\"Need a finer sampling for mode {0}\"",
".",
"format",
"(",
"str",
"(",
"vec",
")",
")",
")",
"failed_nvecs",
".",
"append",
"(",
"vec",
".",
"tolist",
"(",
")",
")",
"# P.append(np.pi - diff/len(X))",
"failures",
".",
"append",
"(",
"1",
")",
"return",
"np",
".",
"array",
"(",
"failed_nvecs",
")",
",",
"np",
".",
"array",
"(",
"failures",
")"
] |
Returns a list of the index of elements of n which do not have adequate
toy angle coverage. The criterion is that we must have at least one sample
in each Nyquist box when we project the toy angles along the vector n.
Parameters
----------
nvecs : array_like
Array of integer vectors.
angles : array_like
Array of angles.
Returns
-------
failed_nvecs : :class:`numpy.ndarray`
Array of all integer vectors that failed checks. Has shape (N,3).
failures : :class:`numpy.ndarray`
Array of flags that designate whether this failed needing a longer
integration window (0) or finer sampling (1).
|
[
"Returns",
"a",
"list",
"of",
"the",
"index",
"of",
"elements",
"of",
"n",
"which",
"do",
"not",
"have",
"adequate",
"toy",
"angle",
"coverage",
".",
"The",
"criterion",
"is",
"that",
"we",
"must",
"have",
"at",
"least",
"one",
"sample",
"in",
"each",
"Nyquist",
"box",
"when",
"we",
"project",
"the",
"toy",
"angles",
"along",
"the",
"vector",
"n",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/actionangle.py#L237-L283
|
train
|
adrn/gala
|
gala/dynamics/actionangle.py
|
find_actions
|
def find_actions(orbit, N_max, force_harmonic_oscillator=False, toy_potential=None):
r"""
Find approximate actions and angles for samples of a phase-space orbit.
Uses toy potentials with known, analytic action-angle transformations to
approximate the true coordinates as a Fourier sum.
This code is adapted from Jason Sanders'
`genfunc <https://github.com/jlsanders/genfunc>`_
Parameters
----------
orbit : `~gala.dynamics.Orbit`
N_max : int
Maximum integer Fourier mode vector length, :math:`|\boldsymbol{n}|`.
force_harmonic_oscillator : bool (optional)
Force using the harmonic oscillator potential as the toy potential.
toy_potential : Potential (optional)
Fix the toy potential class.
Returns
-------
aaf : dict
A Python dictionary containing the actions, angles, frequencies, and
value of the generating function and derivatives for each integer
vector. Each value of the dictionary is a :class:`numpy.ndarray` or
:class:`astropy.units.Quantity`.
"""
if orbit.norbits == 1:
return _single_orbit_find_actions(
orbit, N_max,
force_harmonic_oscillator=force_harmonic_oscillator,
toy_potential=toy_potential)
else:
norbits = orbit.norbits
actions = np.zeros((3, norbits))
angles = np.zeros((3, norbits))
freqs = np.zeros((3, norbits))
for n in range(norbits):
aaf = _single_orbit_find_actions(
orbit[:, n], N_max,
force_harmonic_oscillator=force_harmonic_oscillator,
toy_potential=toy_potential)
actions[n] = aaf['actions'].value
angles[n] = aaf['angles'].value
freqs[n] = aaf['freqs'].value
return dict(actions=actions*aaf['actions'].unit,
angles=angles*aaf['angles'].unit,
freqs=freqs*aaf['freqs'].unit,
Sn=actions[3:], dSn=angles[6:], nvecs=aaf['nvecs'])
|
python
|
def find_actions(orbit, N_max, force_harmonic_oscillator=False, toy_potential=None):
r"""
Find approximate actions and angles for samples of a phase-space orbit.
Uses toy potentials with known, analytic action-angle transformations to
approximate the true coordinates as a Fourier sum.
This code is adapted from Jason Sanders'
`genfunc <https://github.com/jlsanders/genfunc>`_
Parameters
----------
orbit : `~gala.dynamics.Orbit`
N_max : int
Maximum integer Fourier mode vector length, :math:`|\boldsymbol{n}|`.
force_harmonic_oscillator : bool (optional)
Force using the harmonic oscillator potential as the toy potential.
toy_potential : Potential (optional)
Fix the toy potential class.
Returns
-------
aaf : dict
A Python dictionary containing the actions, angles, frequencies, and
value of the generating function and derivatives for each integer
vector. Each value of the dictionary is a :class:`numpy.ndarray` or
:class:`astropy.units.Quantity`.
"""
if orbit.norbits == 1:
return _single_orbit_find_actions(
orbit, N_max,
force_harmonic_oscillator=force_harmonic_oscillator,
toy_potential=toy_potential)
else:
norbits = orbit.norbits
actions = np.zeros((3, norbits))
angles = np.zeros((3, norbits))
freqs = np.zeros((3, norbits))
for n in range(norbits):
aaf = _single_orbit_find_actions(
orbit[:, n], N_max,
force_harmonic_oscillator=force_harmonic_oscillator,
toy_potential=toy_potential)
actions[n] = aaf['actions'].value
angles[n] = aaf['angles'].value
freqs[n] = aaf['freqs'].value
return dict(actions=actions*aaf['actions'].unit,
angles=angles*aaf['angles'].unit,
freqs=freqs*aaf['freqs'].unit,
Sn=actions[3:], dSn=angles[6:], nvecs=aaf['nvecs'])
|
[
"def",
"find_actions",
"(",
"orbit",
",",
"N_max",
",",
"force_harmonic_oscillator",
"=",
"False",
",",
"toy_potential",
"=",
"None",
")",
":",
"if",
"orbit",
".",
"norbits",
"==",
"1",
":",
"return",
"_single_orbit_find_actions",
"(",
"orbit",
",",
"N_max",
",",
"force_harmonic_oscillator",
"=",
"force_harmonic_oscillator",
",",
"toy_potential",
"=",
"toy_potential",
")",
"else",
":",
"norbits",
"=",
"orbit",
".",
"norbits",
"actions",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"norbits",
")",
")",
"angles",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"norbits",
")",
")",
"freqs",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"norbits",
")",
")",
"for",
"n",
"in",
"range",
"(",
"norbits",
")",
":",
"aaf",
"=",
"_single_orbit_find_actions",
"(",
"orbit",
"[",
":",
",",
"n",
"]",
",",
"N_max",
",",
"force_harmonic_oscillator",
"=",
"force_harmonic_oscillator",
",",
"toy_potential",
"=",
"toy_potential",
")",
"actions",
"[",
"n",
"]",
"=",
"aaf",
"[",
"'actions'",
"]",
".",
"value",
"angles",
"[",
"n",
"]",
"=",
"aaf",
"[",
"'angles'",
"]",
".",
"value",
"freqs",
"[",
"n",
"]",
"=",
"aaf",
"[",
"'freqs'",
"]",
".",
"value",
"return",
"dict",
"(",
"actions",
"=",
"actions",
"*",
"aaf",
"[",
"'actions'",
"]",
".",
"unit",
",",
"angles",
"=",
"angles",
"*",
"aaf",
"[",
"'angles'",
"]",
".",
"unit",
",",
"freqs",
"=",
"freqs",
"*",
"aaf",
"[",
"'freqs'",
"]",
".",
"unit",
",",
"Sn",
"=",
"actions",
"[",
"3",
":",
"]",
",",
"dSn",
"=",
"angles",
"[",
"6",
":",
"]",
",",
"nvecs",
"=",
"aaf",
"[",
"'nvecs'",
"]",
")"
] |
r"""
Find approximate actions and angles for samples of a phase-space orbit.
Uses toy potentials with known, analytic action-angle transformations to
approximate the true coordinates as a Fourier sum.
This code is adapted from Jason Sanders'
`genfunc <https://github.com/jlsanders/genfunc>`_
Parameters
----------
orbit : `~gala.dynamics.Orbit`
N_max : int
Maximum integer Fourier mode vector length, :math:`|\boldsymbol{n}|`.
force_harmonic_oscillator : bool (optional)
Force using the harmonic oscillator potential as the toy potential.
toy_potential : Potential (optional)
Fix the toy potential class.
Returns
-------
aaf : dict
A Python dictionary containing the actions, angles, frequencies, and
value of the generating function and derivatives for each integer
vector. Each value of the dictionary is a :class:`numpy.ndarray` or
:class:`astropy.units.Quantity`.
|
[
"r",
"Find",
"approximate",
"actions",
"and",
"angles",
"for",
"samples",
"of",
"a",
"phase",
"-",
"space",
"orbit",
".",
"Uses",
"toy",
"potentials",
"with",
"known",
"analytic",
"action",
"-",
"angle",
"transformations",
"to",
"approximate",
"the",
"true",
"coordinates",
"as",
"a",
"Fourier",
"sum",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/actionangle.py#L539-L591
|
train
|
adrn/gala
|
gala/dynamics/_genfunc/toy_potentials.py
|
angact_ho
|
def angact_ho(x,omega):
""" Calculate angle and action variable in sho potential with
parameter omega """
action = (x[3:]**2+(omega*x[:3])**2)/(2.*omega)
angle = np.array([np.arctan(-x[3+i]/omega[i]/x[i]) if x[i]!=0. else -np.sign(x[3+i])*np.pi/2. for i in range(3)])
for i in range(3):
if(x[i]<0):
angle[i]+=np.pi
return np.concatenate((action,angle % (2.*np.pi)))
|
python
|
def angact_ho(x,omega):
""" Calculate angle and action variable in sho potential with
parameter omega """
action = (x[3:]**2+(omega*x[:3])**2)/(2.*omega)
angle = np.array([np.arctan(-x[3+i]/omega[i]/x[i]) if x[i]!=0. else -np.sign(x[3+i])*np.pi/2. for i in range(3)])
for i in range(3):
if(x[i]<0):
angle[i]+=np.pi
return np.concatenate((action,angle % (2.*np.pi)))
|
[
"def",
"angact_ho",
"(",
"x",
",",
"omega",
")",
":",
"action",
"=",
"(",
"x",
"[",
"3",
":",
"]",
"**",
"2",
"+",
"(",
"omega",
"*",
"x",
"[",
":",
"3",
"]",
")",
"**",
"2",
")",
"/",
"(",
"2.",
"*",
"omega",
")",
"angle",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"arctan",
"(",
"-",
"x",
"[",
"3",
"+",
"i",
"]",
"/",
"omega",
"[",
"i",
"]",
"/",
"x",
"[",
"i",
"]",
")",
"if",
"x",
"[",
"i",
"]",
"!=",
"0.",
"else",
"-",
"np",
".",
"sign",
"(",
"x",
"[",
"3",
"+",
"i",
"]",
")",
"*",
"np",
".",
"pi",
"/",
"2.",
"for",
"i",
"in",
"range",
"(",
"3",
")",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"if",
"(",
"x",
"[",
"i",
"]",
"<",
"0",
")",
":",
"angle",
"[",
"i",
"]",
"+=",
"np",
".",
"pi",
"return",
"np",
".",
"concatenate",
"(",
"(",
"action",
",",
"angle",
"%",
"(",
"2.",
"*",
"np",
".",
"pi",
")",
")",
")"
] |
Calculate angle and action variable in sho potential with
parameter omega
|
[
"Calculate",
"angle",
"and",
"action",
"variable",
"in",
"sho",
"potential",
"with",
"parameter",
"omega"
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/_genfunc/toy_potentials.py#L18-L26
|
train
|
adrn/gala
|
gala/dynamics/util.py
|
peak_to_peak_period
|
def peak_to_peak_period(t, f, amplitude_threshold=1E-2):
"""
Estimate the period of the input time series by measuring the average
peak-to-peak time.
Parameters
----------
t : array_like
Time grid aligned with the input time series.
f : array_like
A periodic time series.
amplitude_threshold : numeric (optional)
A tolerance parameter. Fails if the mean amplitude of oscillations
isn't larger than this tolerance.
Returns
-------
period : float
The mean peak-to-peak period.
"""
if hasattr(t, 'unit'):
t_unit = t.unit
t = t.value
else:
t_unit = u.dimensionless_unscaled
# find peaks
max_ix = argrelmax(f, mode='wrap')[0]
max_ix = max_ix[(max_ix != 0) & (max_ix != (len(f)-1))]
# find troughs
min_ix = argrelmin(f, mode='wrap')[0]
min_ix = min_ix[(min_ix != 0) & (min_ix != (len(f)-1))]
# neglect minor oscillations
if abs(np.mean(f[max_ix]) - np.mean(f[min_ix])) < amplitude_threshold:
return np.nan
# compute mean peak-to-peak
if len(max_ix) > 0:
T_max = np.mean(t[max_ix[1:]] - t[max_ix[:-1]])
else:
T_max = np.nan
# now compute mean trough-to-trough
if len(min_ix) > 0:
T_min = np.mean(t[min_ix[1:]] - t[min_ix[:-1]])
else:
T_min = np.nan
# then take the mean of these two
return np.mean([T_max, T_min]) * t_unit
|
python
|
def peak_to_peak_period(t, f, amplitude_threshold=1E-2):
"""
Estimate the period of the input time series by measuring the average
peak-to-peak time.
Parameters
----------
t : array_like
Time grid aligned with the input time series.
f : array_like
A periodic time series.
amplitude_threshold : numeric (optional)
A tolerance parameter. Fails if the mean amplitude of oscillations
isn't larger than this tolerance.
Returns
-------
period : float
The mean peak-to-peak period.
"""
if hasattr(t, 'unit'):
t_unit = t.unit
t = t.value
else:
t_unit = u.dimensionless_unscaled
# find peaks
max_ix = argrelmax(f, mode='wrap')[0]
max_ix = max_ix[(max_ix != 0) & (max_ix != (len(f)-1))]
# find troughs
min_ix = argrelmin(f, mode='wrap')[0]
min_ix = min_ix[(min_ix != 0) & (min_ix != (len(f)-1))]
# neglect minor oscillations
if abs(np.mean(f[max_ix]) - np.mean(f[min_ix])) < amplitude_threshold:
return np.nan
# compute mean peak-to-peak
if len(max_ix) > 0:
T_max = np.mean(t[max_ix[1:]] - t[max_ix[:-1]])
else:
T_max = np.nan
# now compute mean trough-to-trough
if len(min_ix) > 0:
T_min = np.mean(t[min_ix[1:]] - t[min_ix[:-1]])
else:
T_min = np.nan
# then take the mean of these two
return np.mean([T_max, T_min]) * t_unit
|
[
"def",
"peak_to_peak_period",
"(",
"t",
",",
"f",
",",
"amplitude_threshold",
"=",
"1E-2",
")",
":",
"if",
"hasattr",
"(",
"t",
",",
"'unit'",
")",
":",
"t_unit",
"=",
"t",
".",
"unit",
"t",
"=",
"t",
".",
"value",
"else",
":",
"t_unit",
"=",
"u",
".",
"dimensionless_unscaled",
"# find peaks",
"max_ix",
"=",
"argrelmax",
"(",
"f",
",",
"mode",
"=",
"'wrap'",
")",
"[",
"0",
"]",
"max_ix",
"=",
"max_ix",
"[",
"(",
"max_ix",
"!=",
"0",
")",
"&",
"(",
"max_ix",
"!=",
"(",
"len",
"(",
"f",
")",
"-",
"1",
")",
")",
"]",
"# find troughs",
"min_ix",
"=",
"argrelmin",
"(",
"f",
",",
"mode",
"=",
"'wrap'",
")",
"[",
"0",
"]",
"min_ix",
"=",
"min_ix",
"[",
"(",
"min_ix",
"!=",
"0",
")",
"&",
"(",
"min_ix",
"!=",
"(",
"len",
"(",
"f",
")",
"-",
"1",
")",
")",
"]",
"# neglect minor oscillations",
"if",
"abs",
"(",
"np",
".",
"mean",
"(",
"f",
"[",
"max_ix",
"]",
")",
"-",
"np",
".",
"mean",
"(",
"f",
"[",
"min_ix",
"]",
")",
")",
"<",
"amplitude_threshold",
":",
"return",
"np",
".",
"nan",
"# compute mean peak-to-peak",
"if",
"len",
"(",
"max_ix",
")",
">",
"0",
":",
"T_max",
"=",
"np",
".",
"mean",
"(",
"t",
"[",
"max_ix",
"[",
"1",
":",
"]",
"]",
"-",
"t",
"[",
"max_ix",
"[",
":",
"-",
"1",
"]",
"]",
")",
"else",
":",
"T_max",
"=",
"np",
".",
"nan",
"# now compute mean trough-to-trough",
"if",
"len",
"(",
"min_ix",
")",
">",
"0",
":",
"T_min",
"=",
"np",
".",
"mean",
"(",
"t",
"[",
"min_ix",
"[",
"1",
":",
"]",
"]",
"-",
"t",
"[",
"min_ix",
"[",
":",
"-",
"1",
"]",
"]",
")",
"else",
":",
"T_min",
"=",
"np",
".",
"nan",
"# then take the mean of these two",
"return",
"np",
".",
"mean",
"(",
"[",
"T_max",
",",
"T_min",
"]",
")",
"*",
"t_unit"
] |
Estimate the period of the input time series by measuring the average
peak-to-peak time.
Parameters
----------
t : array_like
Time grid aligned with the input time series.
f : array_like
A periodic time series.
amplitude_threshold : numeric (optional)
A tolerance parameter. Fails if the mean amplitude of oscillations
isn't larger than this tolerance.
Returns
-------
period : float
The mean peak-to-peak period.
|
[
"Estimate",
"the",
"period",
"of",
"the",
"input",
"time",
"series",
"by",
"measuring",
"the",
"average",
"peak",
"-",
"to",
"-",
"peak",
"time",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/util.py#L17-L68
|
train
|
adrn/gala
|
gala/dynamics/util.py
|
estimate_dt_n_steps
|
def estimate_dt_n_steps(w0, hamiltonian, n_periods, n_steps_per_period,
dE_threshold=1E-9, func=np.nanmax,
**integrate_kwargs):
"""
Estimate the timestep and number of steps to integrate an orbit for
given its initial conditions and a potential object.
Parameters
----------
w0 : `~gala.dynamics.PhaseSpacePosition`, array_like
Initial conditions.
potential : :class:`~gala.potential.PotentialBase`
The potential to integrate the orbit in.
n_periods : int
Number of (max) orbital periods to integrate for.
n_steps_per_period : int
Number of steps to take per (max) orbital period.
dE_threshold : numeric (optional)
Maximum fractional energy difference -- used to determine initial
timestep. Set to ``None`` to ignore this.
func : callable (optional)
Determines which period to use. By default, this takes the maximum
period using :func:`~numpy.nanmax`. Other options could be
:func:`~numpy.nanmin`, :func:`~numpy.nanmean`, :func:`~numpy.nanmedian`.
Returns
-------
dt : float
The timestep.
n_steps : int
The number of timesteps to integrate for.
"""
if not isinstance(w0, PhaseSpacePosition):
w0 = np.asarray(w0)
w0 = PhaseSpacePosition.from_w(w0, units=hamiltonian.units)
# integrate orbit
dt = _autodetermine_initial_dt(w0, hamiltonian, dE_threshold=dE_threshold,
**integrate_kwargs)
n_steps = int(round(10000 / dt))
orbit = hamiltonian.integrate_orbit(w0, dt=dt, n_steps=n_steps,
**integrate_kwargs)
# if loop, align circulation with Z and take R period
circ = orbit.circulation()
if np.any(circ):
orbit = orbit.align_circulation_with_z(circulation=circ)
cyl = orbit.represent_as(coord.CylindricalRepresentation)
# convert to cylindrical coordinates
R = cyl.rho.value
phi = cyl.phi.value
z = cyl.z.value
T = np.array([peak_to_peak_period(orbit.t, f).value
for f in [R, phi, z]])*orbit.t.unit
else:
T = np.array([peak_to_peak_period(orbit.t, f).value
for f in orbit.pos])*orbit.t.unit
# timestep from number of steps per period
T = func(T)
if np.isnan(T):
raise RuntimeError("Failed to find period.")
T = T.decompose(hamiltonian.units).value
dt = T / float(n_steps_per_period)
n_steps = int(round(n_periods * T / dt))
if dt == 0. or dt < 1E-13:
raise ValueError("Timestep is zero or very small!")
return dt, n_steps
|
python
|
def estimate_dt_n_steps(w0, hamiltonian, n_periods, n_steps_per_period,
dE_threshold=1E-9, func=np.nanmax,
**integrate_kwargs):
"""
Estimate the timestep and number of steps to integrate an orbit for
given its initial conditions and a potential object.
Parameters
----------
w0 : `~gala.dynamics.PhaseSpacePosition`, array_like
Initial conditions.
potential : :class:`~gala.potential.PotentialBase`
The potential to integrate the orbit in.
n_periods : int
Number of (max) orbital periods to integrate for.
n_steps_per_period : int
Number of steps to take per (max) orbital period.
dE_threshold : numeric (optional)
Maximum fractional energy difference -- used to determine initial
timestep. Set to ``None`` to ignore this.
func : callable (optional)
Determines which period to use. By default, this takes the maximum
period using :func:`~numpy.nanmax`. Other options could be
:func:`~numpy.nanmin`, :func:`~numpy.nanmean`, :func:`~numpy.nanmedian`.
Returns
-------
dt : float
The timestep.
n_steps : int
The number of timesteps to integrate for.
"""
if not isinstance(w0, PhaseSpacePosition):
w0 = np.asarray(w0)
w0 = PhaseSpacePosition.from_w(w0, units=hamiltonian.units)
# integrate orbit
dt = _autodetermine_initial_dt(w0, hamiltonian, dE_threshold=dE_threshold,
**integrate_kwargs)
n_steps = int(round(10000 / dt))
orbit = hamiltonian.integrate_orbit(w0, dt=dt, n_steps=n_steps,
**integrate_kwargs)
# if loop, align circulation with Z and take R period
circ = orbit.circulation()
if np.any(circ):
orbit = orbit.align_circulation_with_z(circulation=circ)
cyl = orbit.represent_as(coord.CylindricalRepresentation)
# convert to cylindrical coordinates
R = cyl.rho.value
phi = cyl.phi.value
z = cyl.z.value
T = np.array([peak_to_peak_period(orbit.t, f).value
for f in [R, phi, z]])*orbit.t.unit
else:
T = np.array([peak_to_peak_period(orbit.t, f).value
for f in orbit.pos])*orbit.t.unit
# timestep from number of steps per period
T = func(T)
if np.isnan(T):
raise RuntimeError("Failed to find period.")
T = T.decompose(hamiltonian.units).value
dt = T / float(n_steps_per_period)
n_steps = int(round(n_periods * T / dt))
if dt == 0. or dt < 1E-13:
raise ValueError("Timestep is zero or very small!")
return dt, n_steps
|
[
"def",
"estimate_dt_n_steps",
"(",
"w0",
",",
"hamiltonian",
",",
"n_periods",
",",
"n_steps_per_period",
",",
"dE_threshold",
"=",
"1E-9",
",",
"func",
"=",
"np",
".",
"nanmax",
",",
"*",
"*",
"integrate_kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"w0",
",",
"PhaseSpacePosition",
")",
":",
"w0",
"=",
"np",
".",
"asarray",
"(",
"w0",
")",
"w0",
"=",
"PhaseSpacePosition",
".",
"from_w",
"(",
"w0",
",",
"units",
"=",
"hamiltonian",
".",
"units",
")",
"# integrate orbit",
"dt",
"=",
"_autodetermine_initial_dt",
"(",
"w0",
",",
"hamiltonian",
",",
"dE_threshold",
"=",
"dE_threshold",
",",
"*",
"*",
"integrate_kwargs",
")",
"n_steps",
"=",
"int",
"(",
"round",
"(",
"10000",
"/",
"dt",
")",
")",
"orbit",
"=",
"hamiltonian",
".",
"integrate_orbit",
"(",
"w0",
",",
"dt",
"=",
"dt",
",",
"n_steps",
"=",
"n_steps",
",",
"*",
"*",
"integrate_kwargs",
")",
"# if loop, align circulation with Z and take R period",
"circ",
"=",
"orbit",
".",
"circulation",
"(",
")",
"if",
"np",
".",
"any",
"(",
"circ",
")",
":",
"orbit",
"=",
"orbit",
".",
"align_circulation_with_z",
"(",
"circulation",
"=",
"circ",
")",
"cyl",
"=",
"orbit",
".",
"represent_as",
"(",
"coord",
".",
"CylindricalRepresentation",
")",
"# convert to cylindrical coordinates",
"R",
"=",
"cyl",
".",
"rho",
".",
"value",
"phi",
"=",
"cyl",
".",
"phi",
".",
"value",
"z",
"=",
"cyl",
".",
"z",
".",
"value",
"T",
"=",
"np",
".",
"array",
"(",
"[",
"peak_to_peak_period",
"(",
"orbit",
".",
"t",
",",
"f",
")",
".",
"value",
"for",
"f",
"in",
"[",
"R",
",",
"phi",
",",
"z",
"]",
"]",
")",
"*",
"orbit",
".",
"t",
".",
"unit",
"else",
":",
"T",
"=",
"np",
".",
"array",
"(",
"[",
"peak_to_peak_period",
"(",
"orbit",
".",
"t",
",",
"f",
")",
".",
"value",
"for",
"f",
"in",
"orbit",
".",
"pos",
"]",
")",
"*",
"orbit",
".",
"t",
".",
"unit",
"# timestep from number of steps per period",
"T",
"=",
"func",
"(",
"T",
")",
"if",
"np",
".",
"isnan",
"(",
"T",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Failed to find period.\"",
")",
"T",
"=",
"T",
".",
"decompose",
"(",
"hamiltonian",
".",
"units",
")",
".",
"value",
"dt",
"=",
"T",
"/",
"float",
"(",
"n_steps_per_period",
")",
"n_steps",
"=",
"int",
"(",
"round",
"(",
"n_periods",
"*",
"T",
"/",
"dt",
")",
")",
"if",
"dt",
"==",
"0.",
"or",
"dt",
"<",
"1E-13",
":",
"raise",
"ValueError",
"(",
"\"Timestep is zero or very small!\"",
")",
"return",
"dt",
",",
"n_steps"
] |
Estimate the timestep and number of steps to integrate an orbit for
given its initial conditions and a potential object.
Parameters
----------
w0 : `~gala.dynamics.PhaseSpacePosition`, array_like
Initial conditions.
potential : :class:`~gala.potential.PotentialBase`
The potential to integrate the orbit in.
n_periods : int
Number of (max) orbital periods to integrate for.
n_steps_per_period : int
Number of steps to take per (max) orbital period.
dE_threshold : numeric (optional)
Maximum fractional energy difference -- used to determine initial
timestep. Set to ``None`` to ignore this.
func : callable (optional)
Determines which period to use. By default, this takes the maximum
period using :func:`~numpy.nanmax`. Other options could be
:func:`~numpy.nanmin`, :func:`~numpy.nanmean`, :func:`~numpy.nanmedian`.
Returns
-------
dt : float
The timestep.
n_steps : int
The number of timesteps to integrate for.
|
[
"Estimate",
"the",
"timestep",
"and",
"number",
"of",
"steps",
"to",
"integrate",
"an",
"orbit",
"for",
"given",
"its",
"initial",
"conditions",
"and",
"a",
"potential",
"object",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/util.py#L94-L169
|
train
|
adrn/gala
|
gala/coordinates/reflex.py
|
reflex_correct
|
def reflex_correct(coords, galactocentric_frame=None):
"""Correct the input Astropy coordinate object for solar reflex motion.
The input coordinate instance must have distance and radial velocity information. If the radial velocity is not known, fill the
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The Astropy coordinate object with position and velocity information.
galactocentric_frame : `~astropy.coordinates.Galactocentric` (optional)
To change properties of the Galactocentric frame, like the height of the
sun above the midplane, or the velocity of the sun in a Galactocentric
intertial frame, set arguments of the
`~astropy.coordinates.Galactocentric` object and pass in to this
function with your coordinates.
Returns
-------
coords : `~astropy.coordinates.SkyCoord`
The coordinates in the same frame as input, but with solar motion
removed.
"""
c = coord.SkyCoord(coords)
# If not specified, use the Astropy default Galactocentric frame
if galactocentric_frame is None:
galactocentric_frame = coord.Galactocentric()
v_sun = galactocentric_frame.galcen_v_sun
observed = c.transform_to(galactocentric_frame)
rep = observed.cartesian.without_differentials()
rep = rep.with_differentials(observed.cartesian.differentials['s'] + v_sun)
fr = galactocentric_frame.realize_frame(rep).transform_to(c.frame)
return coord.SkyCoord(fr)
|
python
|
def reflex_correct(coords, galactocentric_frame=None):
"""Correct the input Astropy coordinate object for solar reflex motion.
The input coordinate instance must have distance and radial velocity information. If the radial velocity is not known, fill the
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The Astropy coordinate object with position and velocity information.
galactocentric_frame : `~astropy.coordinates.Galactocentric` (optional)
To change properties of the Galactocentric frame, like the height of the
sun above the midplane, or the velocity of the sun in a Galactocentric
intertial frame, set arguments of the
`~astropy.coordinates.Galactocentric` object and pass in to this
function with your coordinates.
Returns
-------
coords : `~astropy.coordinates.SkyCoord`
The coordinates in the same frame as input, but with solar motion
removed.
"""
c = coord.SkyCoord(coords)
# If not specified, use the Astropy default Galactocentric frame
if galactocentric_frame is None:
galactocentric_frame = coord.Galactocentric()
v_sun = galactocentric_frame.galcen_v_sun
observed = c.transform_to(galactocentric_frame)
rep = observed.cartesian.without_differentials()
rep = rep.with_differentials(observed.cartesian.differentials['s'] + v_sun)
fr = galactocentric_frame.realize_frame(rep).transform_to(c.frame)
return coord.SkyCoord(fr)
|
[
"def",
"reflex_correct",
"(",
"coords",
",",
"galactocentric_frame",
"=",
"None",
")",
":",
"c",
"=",
"coord",
".",
"SkyCoord",
"(",
"coords",
")",
"# If not specified, use the Astropy default Galactocentric frame",
"if",
"galactocentric_frame",
"is",
"None",
":",
"galactocentric_frame",
"=",
"coord",
".",
"Galactocentric",
"(",
")",
"v_sun",
"=",
"galactocentric_frame",
".",
"galcen_v_sun",
"observed",
"=",
"c",
".",
"transform_to",
"(",
"galactocentric_frame",
")",
"rep",
"=",
"observed",
".",
"cartesian",
".",
"without_differentials",
"(",
")",
"rep",
"=",
"rep",
".",
"with_differentials",
"(",
"observed",
".",
"cartesian",
".",
"differentials",
"[",
"'s'",
"]",
"+",
"v_sun",
")",
"fr",
"=",
"galactocentric_frame",
".",
"realize_frame",
"(",
"rep",
")",
".",
"transform_to",
"(",
"c",
".",
"frame",
")",
"return",
"coord",
".",
"SkyCoord",
"(",
"fr",
")"
] |
Correct the input Astropy coordinate object for solar reflex motion.
The input coordinate instance must have distance and radial velocity information. If the radial velocity is not known, fill the
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The Astropy coordinate object with position and velocity information.
galactocentric_frame : `~astropy.coordinates.Galactocentric` (optional)
To change properties of the Galactocentric frame, like the height of the
sun above the midplane, or the velocity of the sun in a Galactocentric
intertial frame, set arguments of the
`~astropy.coordinates.Galactocentric` object and pass in to this
function with your coordinates.
Returns
-------
coords : `~astropy.coordinates.SkyCoord`
The coordinates in the same frame as input, but with solar motion
removed.
|
[
"Correct",
"the",
"input",
"Astropy",
"coordinate",
"object",
"for",
"solar",
"reflex",
"motion",
"."
] |
ea95575a0df1581bb4b0986aebd6eea8438ab7eb
|
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/coordinates/reflex.py#L5-L40
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.