language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def cf(self):
"""Return a dictionary with the NetCDF CF parameters for the
projection.
Returns:
:obj:`dict`: A dictionary with the NetCDF CF parameter names and
projection parameter values.
"""
return self._cf_params() | def cf(self):
"""Return a dictionary with the NetCDF CF parameters for the
projection.
Returns:
:obj:`dict`: A dictionary with the NetCDF CF parameter names and
projection parameter values.
"""
return self._cf_params() |
Python | def run(self, args=''):
# print('args is: {}'.format(args))
"""Run executable from shell with given arguments
Input:
args Optional, should be given a string to suffix a command
line call to the executable, eg `-sw1 -sw2`
Self variables:
stdout, stderr Process standard communication channels
returncode Process exit status
Process output variables are overwritten if run multiple times
Additionally returns stdout
"""
# Clear output variables from any previous execution
self.stdout, self.stderr, self.returncode = None, None, None
# Run executable from shell with arguments as string
process = subprocess.Popen(
'{} {}'.format(self.command, args), shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Store stdout and stderr
self.stdout, self.stderr = process.communicate()
self.returncode = process.returncode
# Return stdout
return self.stdout | def run(self, args=''):
# print('args is: {}'.format(args))
"""Run executable from shell with given arguments
Input:
args Optional, should be given a string to suffix a command
line call to the executable, eg `-sw1 -sw2`
Self variables:
stdout, stderr Process standard communication channels
returncode Process exit status
Process output variables are overwritten if run multiple times
Additionally returns stdout
"""
# Clear output variables from any previous execution
self.stdout, self.stderr, self.returncode = None, None, None
# Run executable from shell with arguments as string
process = subprocess.Popen(
'{} {}'.format(self.command, args), shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Store stdout and stderr
self.stdout, self.stderr = process.communicate()
self.returncode = process.returncode
# Return stdout
return self.stdout |
Python | def handle(req):
"""handle a request to the function
Args:
req (str): request body
"""
r = requests.get("http://api.open-notify.org/astros.json")
result = r.json()
index = random.randint(0, len(result["people"]) - 1)
name = result["people"][index]["name"]
return "{} is in space".format(name) | def handle(req):
"""handle a request to the function
Args:
req (str): request body
"""
r = requests.get("http://api.open-notify.org/astros.json")
result = r.json()
index = random.randint(0, len(result["people"]) - 1)
name = result["people"][index]["name"]
return "{} is in space".format(name) |
Python | def installSoftware(software):
"""
Install arbitrary software based on a software object
"""
packagelist = ""
for package in software.packages:
packagelist += package + " "
if "{}" in software.installer:
return [software.installer.format(packagelist)]
return [software.installer + " " + packagelist] | def installSoftware(software):
"""
Install arbitrary software based on a software object
"""
packagelist = ""
for package in software.packages:
packagelist += package + " "
if "{}" in software.installer:
return [software.installer.format(packagelist)]
return [software.installer + " " + packagelist] |
Python | def start(self, command="/bin/bash", herefile="EOF"):
"""
Generate the chrooted environment commands. A chrooted environment should have a list of commands to execute (inside of this)
A herefile/string should be supplied to specify the delimiter as the underlying structure uses these.
The herestring should be a string of charachters that is not presend inside the command
"""
if self.user == "root":
return self.chrootAsRoot(command, herefile)
return self.chrootAsUser(command, herefile, herefile+"2") | def start(self, command="/bin/bash", herefile="EOF"):
"""
Generate the chrooted environment commands. A chrooted environment should have a list of commands to execute (inside of this)
A herefile/string should be supplied to specify the delimiter as the underlying structure uses these.
The herestring should be a string of charachters that is not presend inside the command
"""
if self.user == "root":
return self.chrootAsRoot(command, herefile)
return self.chrootAsUser(command, herefile, herefile+"2") |
Python | def install(self):
"""
Install a git repository to a specific location. It also installs git in case that is not present
return the status code from git.
If installing git fails it returns None
"""
return [self.installGit(), "git clone {} {}".format(self.url, self.destination)] | def install(self):
"""
Install a git repository to a specific location. It also installs git in case that is not present
return the status code from git.
If installing git fails it returns None
"""
return [self.installGit(), "git clone {} {}".format(self.url, self.destination)] |
Python | def installGit(self):
"""
Install git if it doesn't exist yes
"""
software = sw.software(packages=[config.GITPACKAGE])
return builder.installSoftware(software) | def installGit(self):
"""
Install git if it doesn't exist yes
"""
software = sw.software(packages=[config.GITPACKAGE])
return builder.installSoftware(software) |
Python | def populateMissingNetwork(content):
"""
See if a piece of the dictionary is missing. If it is then we set it to the hardcoded values defined in config.py
This function should not be called externally. It fills all data related to network configuration options
"""
if not exists(content, "IP"):
content["IP"] = config.IP
if not exists(content, "WIFI_CONNECT_COMMAND"):
content["WIFI_CONNECT_COMMAND"] = config.WIFI_CONNECT_COMMAND
if not exists(content, "WIFI_CONNECT_COMMAND_WITH_PASSWORD"):
content["WIFI_CONNECT_COMMAND_WITH_PASSWORD"] = config.WIFI_CONNECT_COMMAND_WITH_PASSWORD
return content | def populateMissingNetwork(content):
"""
See if a piece of the dictionary is missing. If it is then we set it to the hardcoded values defined in config.py
This function should not be called externally. It fills all data related to network configuration options
"""
if not exists(content, "IP"):
content["IP"] = config.IP
if not exists(content, "WIFI_CONNECT_COMMAND"):
content["WIFI_CONNECT_COMMAND"] = config.WIFI_CONNECT_COMMAND
if not exists(content, "WIFI_CONNECT_COMMAND_WITH_PASSWORD"):
content["WIFI_CONNECT_COMMAND_WITH_PASSWORD"] = config.WIFI_CONNECT_COMMAND_WITH_PASSWORD
return content |
Python | def populateMissingUser(content):
"""
See if a piece of the dictionary is missing. If it is then we set it to the hardcoded values defined in config.py
This function should not be called externally. It fills all data related to user configuration options
"""
if not exists(content, "DEFAULT_SHELL"):
content["DEFAULT_SHELL"] = config.DEFAULT_SHELL
if not exists(content, "USERADD"):
content["USERADD"] = config.USERADD
if not exists(content, "MOUNTPOINT"):
content["MOUNTPOINT"] = config.MOUNTPOINT
return content | def populateMissingUser(content):
"""
See if a piece of the dictionary is missing. If it is then we set it to the hardcoded values defined in config.py
This function should not be called externally. It fills all data related to user configuration options
"""
if not exists(content, "DEFAULT_SHELL"):
content["DEFAULT_SHELL"] = config.DEFAULT_SHELL
if not exists(content, "USERADD"):
content["USERADD"] = config.USERADD
if not exists(content, "MOUNTPOINT"):
content["MOUNTPOINT"] = config.MOUNTPOINT
return content |
Python | def populateMissingBootloader(content):
"""
See if a piece of the dictionary is missing. If it is then we set it to the hardcoded values defined in config.py
This function should not be called externally. It fills all data related to bootloader configuration options
"""
if not exists(content, "BOOTLOADER_EFI"):
content["BOOTLOADER_EFI"] = config.BOOTLOADER_EFI
if not exists(content, "BOOTLOADER_DOS"):
content["BOOTLOADER_DOS"] = config.BOOTLOADER_DOS
if not exists(content, "BOOTLOADER_CONFIG"):
content["BOOTLOADER_CONFIG"] = config.BOOTLOADER_CONFIG
return content | def populateMissingBootloader(content):
"""
See if a piece of the dictionary is missing. If it is then we set it to the hardcoded values defined in config.py
This function should not be called externally. It fills all data related to bootloader configuration options
"""
if not exists(content, "BOOTLOADER_EFI"):
content["BOOTLOADER_EFI"] = config.BOOTLOADER_EFI
if not exists(content, "BOOTLOADER_DOS"):
content["BOOTLOADER_DOS"] = config.BOOTLOADER_DOS
if not exists(content, "BOOTLOADER_CONFIG"):
content["BOOTLOADER_CONFIG"] = config.BOOTLOADER_CONFIG
return content |
Python | def populateMissingSystem(content):
"""
See if a piece of the dictionary is missing. If it is then we set it to the hardcoded values defined in config.py
This function should not be called externally. It fills all data related to system configuration options
"""
if not exists(content, "LOCAL"):
content["LOCAL"] = config.LOCALE
if not exists(content, "KEYMAP"):
content["KEYMAP"] = config.KEYMAP
if not exists(content, "HOSTNAME"):
content["HOSTNAME"] = config.HOSTNAME
if not exists(content, "ROOT_PWD"):
content["ROOT_PWD"] = config.ROOT_PWD
return content | def populateMissingSystem(content):
"""
See if a piece of the dictionary is missing. If it is then we set it to the hardcoded values defined in config.py
This function should not be called externally. It fills all data related to system configuration options
"""
if not exists(content, "LOCAL"):
content["LOCAL"] = config.LOCALE
if not exists(content, "KEYMAP"):
content["KEYMAP"] = config.KEYMAP
if not exists(content, "HOSTNAME"):
content["HOSTNAME"] = config.HOSTNAME
if not exists(content, "ROOT_PWD"):
content["ROOT_PWD"] = config.ROOT_PWD
return content |
Python | def populateMissingLUKS(content):
"""
See if a piece of the dictionary is missing. If it is then we set it to the hardcoded values defined in config.py
This function should not be called externally. It fills all data related to LUKS encryption configuration options
"""
if not exists(content, "LUKS"):
content["LUKS"] = config.LUKS
if not exists(content, "LUKS_OPEN"):
content["LUKS_OPEN"] = config.LUKS_OPEN
if not exists(content, "LUKS_NAME"):
content["LUKS_NAME"] = config.LUKS_NAME
if not exists(content, "LUKS_DEVICE"):
content["LUKS_DEVICE"] = config.LUKS_DEVICE
return content | def populateMissingLUKS(content):
"""
See if a piece of the dictionary is missing. If it is then we set it to the hardcoded values defined in config.py
This function should not be called externally. It fills all data related to LUKS encryption configuration options
"""
if not exists(content, "LUKS"):
content["LUKS"] = config.LUKS
if not exists(content, "LUKS_OPEN"):
content["LUKS_OPEN"] = config.LUKS_OPEN
if not exists(content, "LUKS_NAME"):
content["LUKS_NAME"] = config.LUKS_NAME
if not exists(content, "LUKS_DEVICE"):
content["LUKS_DEVICE"] = config.LUKS_DEVICE
return content |
Python | def populateMissingMisc(content):
"""
See if a piece of the dictionary is missing. If it is then we set it to the hardcoded values defined in config.py
This function should not be called externally. It fills all data related to Miscellanous configuration options
"""
if not exists(content, "INSTALLCOMMAND"):
content["INSTALLCOMMAND"] = config.INSTALLCOMMAND
if not exists(content, "CHROOT"):
content["CHROOT"] = config.CHROOT
if not exists(content, "FSTAB"):
content["FSTAB"] = config.FSTAB
if not exists(content, "GROUPS"):
content["GROUPS"] = config.GROUPS
if not exists(content, "HERESTRING"):
content["HERESTRING"] = config.HERESTRING
if not exists(content, "BOOTSTRAP"):
content["BOOTSTRAP"] = config.BOOTSTRAP
if not exists(content, "BOOTSTRAP_PACKAGES"):
content["BOOTSTRAP_PACKAGES"] = config.BOOTSTRAP_PACKAGES
if not exists(content, "KERNEL"):
content["KERNEL"] = config.KERNEL
return content | def populateMissingMisc(content):
"""
See if a piece of the dictionary is missing. If it is then we set it to the hardcoded values defined in config.py
This function should not be called externally. It fills all data related to Miscellanous configuration options
"""
if not exists(content, "INSTALLCOMMAND"):
content["INSTALLCOMMAND"] = config.INSTALLCOMMAND
if not exists(content, "CHROOT"):
content["CHROOT"] = config.CHROOT
if not exists(content, "FSTAB"):
content["FSTAB"] = config.FSTAB
if not exists(content, "GROUPS"):
content["GROUPS"] = config.GROUPS
if not exists(content, "HERESTRING"):
content["HERESTRING"] = config.HERESTRING
if not exists(content, "BOOTSTRAP"):
content["BOOTSTRAP"] = config.BOOTSTRAP
if not exists(content, "BOOTSTRAP_PACKAGES"):
content["BOOTSTRAP_PACKAGES"] = config.BOOTSTRAP_PACKAGES
if not exists(content, "KERNEL"):
content["KERNEL"] = config.KERNEL
return content |
Python | def populateMissingPieces(content):
"""
See if a piece of the dictionary is missing. If it is then we set it to the hardcoded values defined in config.py
"""
content = populateMissingNetwork(content)
content = populateMissingUser(content)
content = populateMissingBootloader(content)
content = populateMissingSystem(content)
content = populateMissingMisc(content)
return content | def populateMissingPieces(content):
"""
See if a piece of the dictionary is missing. If it is then we set it to the hardcoded values defined in config.py
"""
content = populateMissingNetwork(content)
content = populateMissingUser(content)
content = populateMissingBootloader(content)
content = populateMissingSystem(content)
content = populateMissingMisc(content)
return content |
Python | def exists(dict, key):
"""
Check if a key exists in a dict
"""
return key in dict.keys() | def exists(dict, key):
"""
Check if a key exists in a dict
"""
return key in dict.keys() |
Python | def convertYamlToCommands(executor, config=None):
"""
convert a parser.executor object to commands
"""
commands = []
for step in executor.steps:
# make partitiontable
if type(step) == type(execution.partitiontable()):
commands = concat(commands, PartitionTableGen(step, config))
elif type(step) == type(execution.format()):
commands = concat(commands, formatGen(step, config))
elif type(step) == type(execution.mount()):
commands = concat(commands, mountGen(step, config))
elif type(step) == type(execution.bootstrap()):
commands = concat(commands, bootstrapGen(step, config))
elif type(step) == type(execution.fstab()):
commands = concat(commands, fstabGen(step, config))
elif type(step) == type(execution.scriptstep()):
commands = concat(commands, scriptGen(step, config))
elif type(step) == type(execution.chroot(config)):
commands = concat(commands, chrootGen(step, config))
elif type(step) == type(execution.systemsetup()):
commands = concat(commands, systemGen(step, config))
elif type(step) == type(execution.createUser()):
commands = concat(commands, createUser(step, config))
elif type(step) == type(execution.bootloaderstep()):
commands = concat(commands, bootloaderGen(step, config))
elif type(step) == type(execution.packages()):
commands = concat(commands, packageGen(step, config))
elif type(step) == type(execution.network()):
commands == concat(commands, networkGen(step, config))
else:
print(step)
return commands | def convertYamlToCommands(executor, config=None):
"""
convert a parser.executor object to commands
"""
commands = []
for step in executor.steps:
# make partitiontable
if type(step) == type(execution.partitiontable()):
commands = concat(commands, PartitionTableGen(step, config))
elif type(step) == type(execution.format()):
commands = concat(commands, formatGen(step, config))
elif type(step) == type(execution.mount()):
commands = concat(commands, mountGen(step, config))
elif type(step) == type(execution.bootstrap()):
commands = concat(commands, bootstrapGen(step, config))
elif type(step) == type(execution.fstab()):
commands = concat(commands, fstabGen(step, config))
elif type(step) == type(execution.scriptstep()):
commands = concat(commands, scriptGen(step, config))
elif type(step) == type(execution.chroot(config)):
commands = concat(commands, chrootGen(step, config))
elif type(step) == type(execution.systemsetup()):
commands = concat(commands, systemGen(step, config))
elif type(step) == type(execution.createUser()):
commands = concat(commands, createUser(step, config))
elif type(step) == type(execution.bootloaderstep()):
commands = concat(commands, bootloaderGen(step, config))
elif type(step) == type(execution.packages()):
commands = concat(commands, packageGen(step, config))
elif type(step) == type(execution.network()):
commands == concat(commands, networkGen(step, config))
else:
print(step)
return commands |
Python | def networkGen(step, config):
"""
If no network exists then we will try and connect to one
"""
return concat(["\n#Establishing a network connection"], nw.Connector().getShellCommand(step.model.ssid, step.model.password, config["WIFI_CONNECT_COMMAND_WITH_PASSWORD"], config)) | def networkGen(step, config):
"""
If no network exists then we will try and connect to one
"""
return concat(["\n#Establishing a network connection"], nw.Connector().getShellCommand(step.model.ssid, step.model.password, config["WIFI_CONNECT_COMMAND_WITH_PASSWORD"], config)) |
Python | def chrootGen(step, config):
"""
Build chroot command from a parser.executor.chroot object
"""
commands = convertYamlToCommands(step, config)
return concat(["\n# Executing chroot function"], chroot.chroot(chrootfunc=config["CHROOT"], user=step.user, mountpoint=step.mountpoint).start(command=commands, herefile=config["HERESTRING"])) | def chrootGen(step, config):
"""
Build chroot command from a parser.executor.chroot object
"""
commands = convertYamlToCommands(step, config)
return concat(["\n# Executing chroot function"], chroot.chroot(chrootfunc=config["CHROOT"], user=step.user, mountpoint=step.mountpoint).start(command=commands, herefile=config["HERESTRING"])) |
Python | def scriptGen(step, config):
"""
Generate a script command from a parser.executor.script
"""
if not step.model.command == None:
return concat(["\n# Executing custom script"], script.script(shell="", payload=step.model.command).exec())
with open(step.model.file, 'r') as stream:
return concat(["\n# Executing custom script"], script.script(shell="", payload=stream.read()).exec()) | def scriptGen(step, config):
"""
Generate a script command from a parser.executor.script
"""
if not step.model.command == None:
return concat(["\n# Executing custom script"], script.script(shell="", payload=step.model.command).exec())
with open(step.model.file, 'r') as stream:
return concat(["\n# Executing custom script"], script.script(shell="", payload=stream.read()).exec()) |
Python | def bootstrapGen(step, config):
"""
Bootstrap the system on a drive
"""
return concat(["\n#bootstrapping system"], swb.installSoftware(sw.software(config["BOOTSTRAP"], packages=config["BOOTSTRAP_PACKAGES"]))) | def bootstrapGen(step, config):
"""
Bootstrap the system on a drive
"""
return concat(["\n#bootstrapping system"], swb.installSoftware(sw.software(config["BOOTSTRAP"], packages=config["BOOTSTRAP_PACKAGES"]))) |
Python | def mountGen(mounter, config):
"""
Generate mount commands from a parser.executor.mount object
"""
commands = ["\n#Mounting partitions"]
if type(mounter.model) == type(parsemodel.disk()):
partitions = genPartitions(
mounter.model.partitions, mounter.model.device, config)
else:
partitions = genPartitions(
[mounter.model], mounter.model.device, config)
commands = concat(commands, mb.mountAll(partitions, config["MOUNTPOINT"]))
return commands | def mountGen(mounter, config):
"""
Generate mount commands from a parser.executor.mount object
"""
commands = ["\n#Mounting partitions"]
if type(mounter.model) == type(parsemodel.disk()):
partitions = genPartitions(
mounter.model.partitions, mounter.model.device, config)
else:
partitions = genPartitions(
[mounter.model], mounter.model.device, config)
commands = concat(commands, mb.mountAll(partitions, config["MOUNTPOINT"]))
return commands |
Python | def formatGen(formater, config):
"""
Generate format commands from a parser.executor.format object
"""
commands = ["\n#Formating partitions"]
if type(formater.model) == type(parsemodel.disk()):
partitions = genPartitions(
formater.model.partitions, formater.model.device, config)
else:
partitions = genPartitions(
[formater.model], formater.model.device, config)
for part in partitions:
commands = concat(commands, pb.format(part, config))
return commands | def formatGen(formater, config):
"""
Generate format commands from a parser.executor.format object
"""
commands = ["\n#Formating partitions"]
if type(formater.model) == type(parsemodel.disk()):
partitions = genPartitions(
formater.model.partitions, formater.model.device, config)
else:
partitions = genPartitions(
[formater.model], formater.model.device, config)
for part in partitions:
commands = concat(commands, pb.format(part, config))
return commands |
Python | def PartitionTableGen(ptable, config):
"""
Convert a partitiontable gen step to a list of commands
"""
model = ptable.model
table = tablemodel.disk(model.device, model.size,
genPartitions(model.partitions, model.device, config), model.gpt)
if model.bGenTable:
return concat(["\n#Building partition table"], tablebuilder.buildPartitionTable(table))
return concat(["\n#Building partition table"], tablebuilder.buildPartitionTableEntries(table)) | def PartitionTableGen(ptable, config):
"""
Convert a partitiontable gen step to a list of commands
"""
model = ptable.model
table = tablemodel.disk(model.device, model.size,
genPartitions(model.partitions, model.device, config), model.gpt)
if model.bGenTable:
return concat(["\n#Building partition table"], tablebuilder.buildPartitionTable(table))
return concat(["\n#Building partition table"], tablebuilder.buildPartitionTableEntries(table)) |
Python | def genPartitions(parserPartitions, diskdevice, config):
"""
generate a list of model partition from a parser partition model
"""
partitions = []
if "nvme" in diskdevice:
diskdevice += "p"
# TODO: Generate logic volumes here as well
for i, part in enumerate(parserPartitions):
device = diskdevice+str(i+1)
offset = i+1
if (part.offset != None):
device = diskdevice + str(part.offset)
offset = part.offset
partitions.append(partition.partition(device,
part.name, part.mount, part.filesystem, part.start, part.end, part.bIsEncrypted, part.volumes, part.password, part.resize, part.size, offset))
return partitions | def genPartitions(parserPartitions, diskdevice, config):
"""
generate a list of model partition from a parser partition model
"""
partitions = []
if "nvme" in diskdevice:
diskdevice += "p"
# TODO: Generate logic volumes here as well
for i, part in enumerate(parserPartitions):
device = diskdevice+str(i+1)
offset = i+1
if (part.offset != None):
device = diskdevice + str(part.offset)
offset = part.offset
partitions.append(partition.partition(device,
part.name, part.mount, part.filesystem, part.start, part.end, part.bIsEncrypted, part.volumes, part.password, part.resize, part.size, offset))
return partitions |
Python | def BuildSoftwareFromFile(filename, installer=config.INSTALLCOMMAND):
"""
Build a software object from a file containing packages
@file = a file containing packages (every line is a package)
@installer = the base install command to install software
"""
sw = software(installer=installer)
with open(filename) as f:
lines = f.read().splitlines()
sw.packages = lines
return sw | def BuildSoftwareFromFile(filename, installer=config.INSTALLCOMMAND):
"""
Build a software object from a file containing packages
@file = a file containing packages (every line is a package)
@installer = the base install command to install software
"""
sw = software(installer=installer)
with open(filename) as f:
lines = f.read().splitlines()
sw.packages = lines
return sw |
Python | def generate(raw, config):
"""
Convert raw yaml convertion to a class based representation
"""
representation = yamlfile.file()
representation.model = model.generateModel(raw["models"], config)
representation.execution = execution.generateExecution(
raw["execution"], config)
return representation | def generate(raw, config):
"""
Convert raw yaml convertion to a class based representation
"""
representation = yamlfile.file()
representation.model = model.generateModel(raw["models"], config)
representation.execution = execution.generateExecution(
raw["execution"], config)
return representation |
Python | def modelLinker(file):
"""
convert a raw yamlfile.file object to a linked model to executor linker
It will link generated models to executors. So that each executor knows with which model to work with
"""
for executor in file.execution.steps:
executor.setModel(file.model)
if type(executor) == type(execution.chroot({"MOUNTPOINT": ""})):
for step in executor.steps:
step.setModel(file.model)
return file | def modelLinker(file):
"""
convert a raw yamlfile.file object to a linked model to executor linker
It will link generated models to executors. So that each executor knows with which model to work with
"""
for executor in file.execution.steps:
executor.setModel(file.model)
if type(executor) == type(execution.chroot({"MOUNTPOINT": ""})):
for step in executor.steps:
step.setModel(file.model)
return file |
Python | def buildPartitionTable(Disk):
"""
Generate the partition table commands based on a disk object
The disk object must have a root partition
"""
commands = [getPartitionTableType(Disk)]
for part in Disk.partitions:
commands = concat(commands, getPartitionCommands(
Disk, part, part.offset))
return commands | def buildPartitionTable(Disk):
"""
Generate the partition table commands based on a disk object
The disk object must have a root partition
"""
commands = [getPartitionTableType(Disk)]
for part in Disk.partitions:
commands = concat(commands, getPartitionCommands(
Disk, part, part.offset))
return commands |
Python | def buildPartitionTableEntries(Disk):
"""
Generate the partition table commands based on a disk object
It appends the partitions to an exisiting partition table
"""
commands = []
for index, part in enumerate(Disk.partitions):
commands = concat(commands, getPartitionCommands(
Disk, part, part.offset))
return commands | def buildPartitionTableEntries(Disk):
"""
Generate the partition table commands based on a disk object
It appends the partitions to an exisiting partition table
"""
commands = []
for index, part in enumerate(Disk.partitions):
commands = concat(commands, getPartitionCommands(
Disk, part, part.offset))
return commands |
Python | def buildPrimaryPartition(disk, Partition, index=0, name="root"):
"""
Alter the partition table to include a primary typed partition on a certain disk
@disk is the device to alter its partition table
@Partition is the model of a partition
@index is the index of the partition in the partition table
@name is the name to assign this partition in the table
"""
commands = []
commands.append(getBaseCommand(
disk) + "mkpart primary {} {}".format(Partition.start, Partition.end))
commands.append(getBaseCommand(disk) + "name {} {}".format(index, name))
return commands | def buildPrimaryPartition(disk, Partition, index=0, name="root"):
"""
Alter the partition table to include a primary typed partition on a certain disk
@disk is the device to alter its partition table
@Partition is the model of a partition
@index is the index of the partition in the partition table
@name is the name to assign this partition in the table
"""
commands = []
commands.append(getBaseCommand(
disk) + "mkpart primary {} {}".format(Partition.start, Partition.end))
commands.append(getBaseCommand(disk) + "name {} {}".format(index, name))
return commands |
Python | def buildSwapPartition(disk, Partition, index=0, name="swap"):
"""
Alter the partition table to include a primary typed partition on a certain disk
@disk is the device to alter its partition table
@Partition is the model of a partition
@index is the index of the partition in the partition table
@name is the name to assign this partition in the table
"""
return buildPrimaryPartition(disk, Partition, index, name) | def buildSwapPartition(disk, Partition, index=0, name="swap"):
"""
Alter the partition table to include a primary typed partition on a certain disk
@disk is the device to alter its partition table
@Partition is the model of a partition
@index is the index of the partition in the partition table
@name is the name to assign this partition in the table
"""
return buildPrimaryPartition(disk, Partition, index, name) |
Python | def buildEncryptedPrimaryPartition(disk, Partition, index=0, name="root"):
"""
Alter the partition table to include a primary typed partition on a certain disk
@disk is the device to alter its partition table
@Partition is the model of a partition
@index is the index of the partition in the partition table
@name is the name to assign this partition in the table
"""
commands = []
commands.append(getBaseCommand(
disk) + "mkpart primary {} {}".format(Partition.start, Partition.end))
commands.append(getBaseCommand(disk) + "set {} lvm on".format(index))
commands.append(getBaseCommand(disk) + "name {} {}".format(index, name))
return commands | def buildEncryptedPrimaryPartition(disk, Partition, index=0, name="root"):
"""
Alter the partition table to include a primary typed partition on a certain disk
@disk is the device to alter its partition table
@Partition is the model of a partition
@index is the index of the partition in the partition table
@name is the name to assign this partition in the table
"""
commands = []
commands.append(getBaseCommand(
disk) + "mkpart primary {} {}".format(Partition.start, Partition.end))
commands.append(getBaseCommand(disk) + "set {} lvm on".format(index))
commands.append(getBaseCommand(disk) + "name {} {}".format(index, name))
return commands |
Python | def buildResizePartition(disk, Partition, index=0, name="root"):
"""
Resize an exisiting partition to a new size based on its index and the size
"""
commands = []
commands.append(getBaseCommand(
disk) + "resizepart {} {}".format(index, Partition.size))
return commands | def buildResizePartition(disk, Partition, index=0, name="root"):
"""
Resize an exisiting partition to a new size based on its index and the size
"""
commands = []
commands.append(getBaseCommand(
disk) + "resizepart {} {}".format(index, Partition.size))
return commands |
Python | def format(part, config):
"""
format a partition based on the filesystem.
It returns a list of command to run
If no matching filesystem was found we return None
"""
if part.filesystem == EFilesystem.EXT4:
return ["mkfs.ext4 -F {}".format(part.device)]
elif part.filesystem == EFilesystem.BTRFS:
return ["mkfs.btrfs -f {}".format(part.device)]
elif part.filesystem == EFilesystem.FAT32:
return ["mkfs.fat -I -F32 {}".format(part.device)]
elif part.filesystem == EFilesystem.SWAP:
return ["mkswap -f {}".format(part.device)]
elif part.bIsEncrypted or part.filesystem == EFilesystem.LUKS:
return handleEncryptedPartition(part, config)
return None | def format(part, config):
"""
format a partition based on the filesystem.
It returns a list of command to run
If no matching filesystem was found we return None
"""
if part.filesystem == EFilesystem.EXT4:
return ["mkfs.ext4 -F {}".format(part.device)]
elif part.filesystem == EFilesystem.BTRFS:
return ["mkfs.btrfs -f {}".format(part.device)]
elif part.filesystem == EFilesystem.FAT32:
return ["mkfs.fat -I -F32 {}".format(part.device)]
elif part.filesystem == EFilesystem.SWAP:
return ["mkswap -f {}".format(part.device)]
elif part.bIsEncrypted or part.filesystem == EFilesystem.LUKS:
return handleEncryptedPartition(part, config)
return None |
Python | def makeUnixUser(user, config):
"""
return a list of commands needed to build the user
"""
groups = listToString(user.groups)
return [config["USERADD"].format(getEncryptedPassword(user.password), groups, user.shell, user.name)] | def makeUnixUser(user, config):
"""
return a list of commands needed to build the user
"""
groups = listToString(user.groups)
return [config["USERADD"].format(getEncryptedPassword(user.password), groups, user.shell, user.name)] |
Python | def mountAll(parts, mountpoint=config.MOUNTPOINT):
"""
@parts= a list of partitions
@mountpoint=the place to mount the partitions
Mount a list of partitions in the right order
First root is mounted then the rest
Return a list of commands
"""
command = []
# Add the root mount first
for part in parts:
if part.mountpoint == "/":
command = concat(command, mount(part, mountpoint))
break
# mount /boot as second (in case there is an efi directory the /boot should be mounted first)
for part in parts:
if part.mountpoint == "/boot":
command = concat(command, mount(part, mountpoint))
# Add all other mounts
for part in parts:
if part.mountpoint != "/" and part.mountpoint != "/boot":
command = concat(command, mount(part, mountpoint))
return command | def mountAll(parts, mountpoint=config.MOUNTPOINT):
"""
@parts= a list of partitions
@mountpoint=the place to mount the partitions
Mount a list of partitions in the right order
First root is mounted then the rest
Return a list of commands
"""
command = []
# Add the root mount first
for part in parts:
if part.mountpoint == "/":
command = concat(command, mount(part, mountpoint))
break
# mount /boot as second (in case there is an efi directory the /boot should be mounted first)
for part in parts:
if part.mountpoint == "/boot":
command = concat(command, mount(part, mountpoint))
# Add all other mounts
for part in parts:
if part.mountpoint != "/" and part.mountpoint != "/boot":
command = concat(command, mount(part, mountpoint))
return command |
Python | def mount(part, mountpoint):
"""
Mount a partition based on its filesystem/mountpoint
"""
if part.mountpoint == "swap" or part.mountpoint == "[swap]":
return mountswap(part, mountpoint)
elif part.mountpoint == "/":
return mountroot(part, mountpoint)
return mountStandard(part, mountpoint) | def mount(part, mountpoint):
"""
Mount a partition based on its filesystem/mountpoint
"""
if part.mountpoint == "swap" or part.mountpoint == "[swap]":
return mountswap(part, mountpoint)
elif part.mountpoint == "/":
return mountroot(part, mountpoint)
return mountStandard(part, mountpoint) |
Python | def mountStandard(part, mountpoint):
"""
Mount all partitions that are not "special" aka partitions that are not root, swap etc
boot partitions are allowed here
"""
mount = mountpoint + "/" + part.mountpoint
if mountpoint[-1:] == "/" or part.mountpoint[0] == "/":
mount = mountpoint + part.mountpoint
return ["mkdir -p {}".format(mount), "mount {} {}".format(part.device, mount)] | def mountStandard(part, mountpoint):
"""
Mount all partitions that are not "special" aka partitions that are not root, swap etc
boot partitions are allowed here
"""
mount = mountpoint + "/" + part.mountpoint
if mountpoint[-1:] == "/" or part.mountpoint[0] == "/":
mount = mountpoint + part.mountpoint
return ["mkdir -p {}".format(mount), "mount {} {}".format(part.device, mount)] |
Python | def generateExecution(raw, config):
"""
generate a execution from a raw yaml model
"""
steps = []
for step in raw:
steps.append(getStep(step, config))
executor = execution()
executor.steps = steps
return executor | def generateExecution(raw, config):
"""
generate a execution from a raw yaml model
"""
steps = []
for step in raw:
steps.append(getStep(step, config))
executor = execution()
executor.steps = steps
return executor |
Python | def handleEncryptedPartition(part, config):
"""
Convert a partition into a luks volume
The volume is created from @volumes
@volumes = a list of logicvolumes mounted on config.LUKS_DEVICE
The important parameters of @volumes is the mountpoint, name and size
"""
command = ["modprobe dm-crypt", "modprobe dm-mod"]
command.append("printf '{}' | ".format(part.password) +
config["LUKS"].format(part.device))
command.append("printf '{}' | ".format(part.password) +
config["LUKS_OPEN"].format(part.device))
command.append("pvcreate " + config["LUKS_DEVICE"])
command.append("vgcreate {} ".format(
config["LUKS_NAME"]) + config["LUKS_DEVICE"])
for volume in part.volumes:
command.append(
"lvcreate -n {} -L {} {}".format(volume.name, volume.size, config["LUKS_NAME"]))
# add format command for volumes
for volume in part.volumes:
formating = formatVolume(volume.name, volume.mountpoint, config)
for form in formating:
command.append(form)
return command | def handleEncryptedPartition(part, config):
"""
Convert a partition into a luks volume
The volume is created from @volumes
@volumes = a list of logicvolumes mounted on config.LUKS_DEVICE
The important parameters of @volumes is the mountpoint, name and size
"""
command = ["modprobe dm-crypt", "modprobe dm-mod"]
command.append("printf '{}' | ".format(part.password) +
config["LUKS"].format(part.device))
command.append("printf '{}' | ".format(part.password) +
config["LUKS_OPEN"].format(part.device))
command.append("pvcreate " + config["LUKS_DEVICE"])
command.append("vgcreate {} ".format(
config["LUKS_NAME"]) + config["LUKS_DEVICE"])
for volume in part.volumes:
command.append(
"lvcreate -n {} -L {} {}".format(volume.name, volume.size, config["LUKS_NAME"]))
# add format command for volumes
for volume in part.volumes:
formating = formatVolume(volume.name, volume.mountpoint, config)
for form in formating:
command.append(form)
return command |
Python | def generateModel(raw, config):
"""
generate a model from a raw yaml model
"""
representation = models()
for dic in raw:
if exists(dic, "bootloader"):
representation.bootloader = generateBootloader(dic["bootloader"])
if exists(dic, "system"):
representation.system = generateSystem(dic["system"], config)
if exists(dic, "chroots") and type(dic["chroots"]) is list:
representation.chroots = generateChroots(dic, config)
if exists(dic, "disks") and type(dic["disks"]) is list:
representation.disks = generateDisks(dic)
if exists(dic, "packages") and type(dic["packages"]) is list:
representation.packages = generatePackages(dic, config)
if exists(dic, "users") and type(dic["users"]) is list:
representation.users = generateUsers(dic, config)
if exists(dic, "scripts") and type(dic["scripts"]) is list:
representation.scripts = generateScripts(dic)
if exists(dic, "network"):
representation.network = generateNetwork(dic["network"])
return representation | def generateModel(raw, config):
"""
generate a model from a raw yaml model
"""
representation = models()
for dic in raw:
if exists(dic, "bootloader"):
representation.bootloader = generateBootloader(dic["bootloader"])
if exists(dic, "system"):
representation.system = generateSystem(dic["system"], config)
if exists(dic, "chroots") and type(dic["chroots"]) is list:
representation.chroots = generateChroots(dic, config)
if exists(dic, "disks") and type(dic["disks"]) is list:
representation.disks = generateDisks(dic)
if exists(dic, "packages") and type(dic["packages"]) is list:
representation.packages = generatePackages(dic, config)
if exists(dic, "users") and type(dic["users"]) is list:
representation.users = generateUsers(dic, config)
if exists(dic, "scripts") and type(dic["scripts"]) is list:
representation.scripts = generateScripts(dic)
if exists(dic, "network"):
representation.network = generateNetwork(dic["network"])
return representation |
Python | def HasNetwork(self, host=config.IP, port=53, timeout=3):
"""
Access the network and see if a connection exists
This discards DNS requests and focuses entirely on ip packets
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(
(host, port))
return True
except socket.error as ex:
print(ex)
return False | def HasNetwork(self, host=config.IP, port=53, timeout=3):
"""
Access the network and see if a connection exists
This discards DNS requests and focuses entirely on ip packets
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(
(host, port))
return True
except socket.error as ex:
print(ex)
return False |
Python | def establishConnectionCommand(self, command=config.WIFI_CONNECT_COMMAND):
"""
Try to interactivaly make a connection
"""
obj = shell.Command(command).GetReturnCode()
self.bIsConnected = self.HasNetwork() | def establishConnectionCommand(self, command=config.WIFI_CONNECT_COMMAND):
"""
Try to interactivaly make a connection
"""
obj = shell.Command(command).GetReturnCode()
self.bIsConnected = self.HasNetwork() |
Python | def establishConnection(self, ssid, password, command=config.WIFI_CONNECT_COMMAND_WITH_PASSWORD):
"""
Try to make a basic network connection by ssid, password
"""
res = shell.Command(command.format(ssid, password)).GetStdout()
self.bIsConnected = self.HasNetwork()
return res | def establishConnection(self, ssid, password, command=config.WIFI_CONNECT_COMMAND_WITH_PASSWORD):
"""
Try to make a basic network connection by ssid, password
"""
res = shell.Command(command.format(ssid, password)).GetStdout()
self.bIsConnected = self.HasNetwork()
return res |
Python | def SetColumns(self, columnNames):
"""Define the column names (dictionary keys pointing to columns
within the data frame).
columnNames should be a list of objects (usually strings).
If called more than once, erases previous column-name definitions.
"""
if self.dict != {}:
# remove old column definitions
for oldName in self.dict:
# remove old column-name attributes
oldName_attr = oldName.split()[0].strip()
if oldName_attr in self.__dict__:
junk = self.__dict__.pop(oldName_attr)
self.dict = {}
for i in range(self.nCols):
try:
colName = columnNames[i]
self.dict[colName] = self.data[i]
# define a new attribute, if possible (for access via x.colName)
if type(colName) is str:
colName_attr = colName.split()[0].strip()
self.__dict__[colName_attr] = self.data[i]
except IndexError:
pass
self.colNames = columnNames | def SetColumns(self, columnNames):
"""Define the column names (dictionary keys pointing to columns
within the data frame).
columnNames should be a list of objects (usually strings).
If called more than once, erases previous column-name definitions.
"""
if self.dict != {}:
# remove old column definitions
for oldName in self.dict:
# remove old column-name attributes
oldName_attr = oldName.split()[0].strip()
if oldName_attr in self.__dict__:
junk = self.__dict__.pop(oldName_attr)
self.dict = {}
for i in range(self.nCols):
try:
colName = columnNames[i]
self.dict[colName] = self.data[i]
# define a new attribute, if possible (for access via x.colName)
if type(colName) is str:
colName_attr = colName.split()[0].strip()
self.__dict__[colName_attr] = self.data[i]
except IndexError:
pass
self.colNames = columnNames |
Python | def SetAltColumns(self, columnNames):
"""Define an additional set of column names (dictionary keys pointing
to columns within the data array) for all columns.
Does not erase previous column-name definitions.
"""
for i in range(self.nCols):
try:
colName = columnNames[i]
self.dict[colName] = self.data[i]
# define a new attribute, if possible (for access via x.colName)
if type(colName) is str:
colName_attr = colName.split()[0].strip()
self.__dict__[colName_attr] = self.data[i]
except IndexError:
pass | def SetAltColumns(self, columnNames):
"""Define an additional set of column names (dictionary keys pointing
to columns within the data array) for all columns.
Does not erase previous column-name definitions.
"""
for i in range(self.nCols):
try:
colName = columnNames[i]
self.dict[colName] = self.data[i]
# define a new attribute, if possible (for access via x.colName)
if type(colName) is str:
colName_attr = colName.split()[0].strip()
self.__dict__[colName_attr] = self.data[i]
except IndexError:
pass |
Python | def ChangeColumnName(self, oldName, newName):
"""Change the name of one of the columns. Change is propagated into
the internal attribute dictionary, so obj.newName will return the
column which obj.oldName formerly returned.
"""
if (oldName in self.colNames):
# replace name in column name list
newList = self.colNames[:]
i_old = newList.index(oldName)
newList.insert(i_old, newName)
newList.remove(oldName)
# store new column names, generate keys and attributes
self.SetColumns(newList)
# clean up internal dictionary by removing old attribute ref
oldName_attr = oldName.split()[0].strip()
if oldName in self.__dict__:
junk = self.__dict__.pop(oldName_attr)
else:
msg = "Column name \"%s\" does not exist." % oldName
raise KeyError(msg) | def ChangeColumnName(self, oldName, newName):
"""Change the name of one of the columns. Change is propagated into
the internal attribute dictionary, so obj.newName will return the
column which obj.oldName formerly returned.
"""
if (oldName in self.colNames):
# replace name in column name list
newList = self.colNames[:]
i_old = newList.index(oldName)
newList.insert(i_old, newName)
newList.remove(oldName)
# store new column names, generate keys and attributes
self.SetColumns(newList)
# clean up internal dictionary by removing old attribute ref
oldName_attr = oldName.split()[0].strip()
if oldName in self.__dict__:
junk = self.__dict__.pop(oldName_attr)
else:
msg = "Column name \"%s\" does not exist." % oldName
raise KeyError(msg) |
Python | def AddNewColumn(self, dataColumn, columnName=None):
"""Adds a new column to the ListDataFrame, along with the column name,
if supplied. Throws an error if dataColumn is not a list or numpy array;
also throws an error if the length of dataColumns is different from
the existing columns.
"""
if type(dataColumn) not in [list, np.ndarray]:
raise TypeError(error1)
if len(dataColumn) != len(self.data[0]):
raise TypeError(error2)
self.data.append(dataColumn)
self.nCols += 1
if columnName is not None and self.colNames is not None:
columnNames = copy.copy(self.colNames)
columnNames.append(columnName)
self.SetColumns(columnNames) | def AddNewColumn(self, dataColumn, columnName=None):
"""Adds a new column to the ListDataFrame, along with the column name,
if supplied. Throws an error if dataColumn is not a list or numpy array;
also throws an error if the length of dataColumns is different from
the existing columns.
"""
if type(dataColumn) not in [list, np.ndarray]:
raise TypeError(error1)
if len(dataColumn) != len(self.data[0]):
raise TypeError(error2)
self.data.append(dataColumn)
self.nCols += 1
if columnName is not None and self.colNames is not None:
columnNames = copy.copy(self.colNames)
columnNames.append(columnName)
self.SetColumns(columnNames) |
Python | def SetColumns(self, columnNames):
"""Define the column names (dictionary keys pointing to columns
within the data array.
If called more than once, erases previous column-name definitions.
"""
if self.dict != {}:
# remove old column definitions
for oldName in self.dict:
# remove old column-name attributes
oldName_attr = oldName.split()[0].strip()
if oldName_attr in self.__dict__:
junk = self.__dict__.pop(oldName_attr)
self.dict = {}
for i in range(self.nCols):
try:
colName = columnNames[i]
self.dict[colName] = self.data[:,i]
# define a new attribute, if possible
if type(colName) is str:
colName_attr = colName.split()[0].strip()
self.__dict__[colName_attr] = self.data[:,i]
except IndexError:
pass | def SetColumns(self, columnNames):
"""Define the column names (dictionary keys pointing to columns
within the data array.
If called more than once, erases previous column-name definitions.
"""
if self.dict != {}:
# remove old column definitions
for oldName in self.dict:
# remove old column-name attributes
oldName_attr = oldName.split()[0].strip()
if oldName_attr in self.__dict__:
junk = self.__dict__.pop(oldName_attr)
self.dict = {}
for i in range(self.nCols):
try:
colName = columnNames[i]
self.dict[colName] = self.data[:,i]
# define a new attribute, if possible
if type(colName) is str:
colName_attr = colName.split()[0].strip()
self.__dict__[colName_attr] = self.data[:,i]
except IndexError:
pass |
Python | def SetAltColumns(self, columnNames):
"""Define an additional set of column names (dictionary keys pointing
to columns within the data array).
Does not erase previous column-name definitions.
"""
for i in range(self.nCols):
try:
colName = columnNames[i]
self.dict[colName] = self.data[:,i]
# define a new attribute, if possible
if type(colName) is str:
colName_attr = colName.split()[0].strip()
self.__dict__[colName_attr] = self.data[:,i]
except IndexError:
pass | def SetAltColumns(self, columnNames):
"""Define an additional set of column names (dictionary keys pointing
to columns within the data array).
Does not erase previous column-name definitions.
"""
for i in range(self.nCols):
try:
colName = columnNames[i]
self.dict[colName] = self.data[:,i]
# define a new attribute, if possible
if type(colName) is str:
colName_attr = colName.split()[0].strip()
self.__dict__[colName_attr] = self.data[:,i]
except IndexError:
pass |
Python | def InsertAndReplace( theList, ii, newItems ):
"""Given a list, replace the entry at index ii with the elements of
newItems (also a list).
"""
nNewItems = len(newItems)
if (ii < 0) or (ii > len(theList)):
msg = "\ndatautils.InsertAndReplace: *** ERROR: requested insert location"
msg += " (index = %d) is < 0 or > length(theList) [%d]" % (ii, len(theList))
msg += "\n"
print(msg)
return
del theList[ii]
for j in reversed(range(nNewItems)): theList.insert(ii, newItems[j]) | def InsertAndReplace( theList, ii, newItems ):
"""Given a list, replace the entry at index ii with the elements of
newItems (also a list).
"""
nNewItems = len(newItems)
if (ii < 0) or (ii > len(theList)):
msg = "\ndatautils.InsertAndReplace: *** ERROR: requested insert location"
msg += " (index = %d) is < 0 or > length(theList) [%d]" % (ii, len(theList))
msg += "\n"
print(msg)
return
del theList[ii]
for j in reversed(range(nNewItems)): theList.insert(ii, newItems[j]) |
Python | def AddExtraColumnNames( columnNames, subListColumns, subListLengths, subListSuffixes ):
"""Given a list of column names, process it to replace column names for those
columns which have sub-lists. New column names corresponding to each sub-list
column are inserted in place of the original name, for each such column.
"""
oldColNames = [ columnNames[i] for i in subListColumns ]
nSubListCols = len(subListColumns)
for i in range(nSubListCols):
baseName = oldColNames[i]
nSubLists = subListLengths[i]
# generate suffixes:
if (subListSuffixes is not None):
if (len(subListSuffixes) != nSubLists):
msg = "\tdatautils.AddExtraColumnNames: *** WARNING: number of subListSuffixes"
msg += " elements (%d) != actual number\n\tof sub-lists (%d)" % (len(subListSuffixes), nSubLists)
msg += " for column with orig. name = %s...\n" % (baseName)
msg += "\tNumerical suffixes will be used instead for new column names.\n"
print(msg)
suffixes = [ str(k) for k in range(nSubLists) ]
else:
suffixes = subListSuffixes
else:
suffixes = [ str(k) for k in range(nSubLists) ]
newNames = [ "%s_%s" % (baseName, suffixes[k]) for k in range(nSubLists) ]
insertLoc = columnNames.index(baseName)
InsertAndReplace(columnNames, insertLoc, newNames) | def AddExtraColumnNames( columnNames, subListColumns, subListLengths, subListSuffixes ):
"""Given a list of column names, process it to replace column names for those
columns which have sub-lists. New column names corresponding to each sub-list
column are inserted in place of the original name, for each such column.
"""
oldColNames = [ columnNames[i] for i in subListColumns ]
nSubListCols = len(subListColumns)
for i in range(nSubListCols):
baseName = oldColNames[i]
nSubLists = subListLengths[i]
# generate suffixes:
if (subListSuffixes is not None):
if (len(subListSuffixes) != nSubLists):
msg = "\tdatautils.AddExtraColumnNames: *** WARNING: number of subListSuffixes"
msg += " elements (%d) != actual number\n\tof sub-lists (%d)" % (len(subListSuffixes), nSubLists)
msg += " for column with orig. name = %s...\n" % (baseName)
msg += "\tNumerical suffixes will be used instead for new column names.\n"
print(msg)
suffixes = [ str(k) for k in range(nSubLists) ]
else:
suffixes = subListSuffixes
else:
suffixes = [ str(k) for k in range(nSubLists) ]
newNames = [ "%s_%s" % (baseName, suffixes[k]) for k in range(nSubLists) ]
insertLoc = columnNames.index(baseName)
InsertAndReplace(columnNames, insertLoc, newNames) |
Python | def ColumnToFloats( inputList, blankValue ):
"""Takes a list of numbers in string format and converts them to floating-point,
with blank entries being replaced by blankValue (which should be float).
"""
try:
floatList = np.array(inputList, "Float64")
except ValueError:
# looks like column has some blanks in it
floatList = copy.copy(inputList)
for j in range(len(inputList)):
try:
floatList[j] = float(inputList[j])
except ValueError:
floatList[j] = blankValue
floatList = np.array(floatList)
return floatList | def ColumnToFloats( inputList, blankValue ):
"""Takes a list of numbers in string format and converts them to floating-point,
with blank entries being replaced by blankValue (which should be float).
"""
try:
floatList = np.array(inputList, "Float64")
except ValueError:
# looks like column has some blanks in it
floatList = copy.copy(inputList)
for j in range(len(inputList)):
try:
floatList[j] = float(inputList[j])
except ValueError:
floatList[j] = blankValue
floatList = np.array(floatList)
return floatList |
Python | def ReadCompositeTable( fileName, skip="#", delimiter=None, noConvert=None,
intCols=None, blankVal=0, convertSubLists=False, expandSubLists=False,
dataFrame=False, columnRow=None, subListSuffixes=None ):
"""Function which reads a text datafile and returns a list of columns.
Comments and other lines to skip should start with the "skip" character
(which by default is "#"); column separators are specified with "delimiter"
(default is whitespace).
By default, all columns are converted to 1-D NumPy arrays, unless
the data in that column are non-numeric [only the first row of data is
checked to see which columns might be non-numeric] or the column number
[0-based: first column = 0, 2nd column = 1, etc.] is in the noConvert list.
Numeric columns with column number in intCols (list) are converted to Int64 arrays;
all other numeric columns become Float64 arrays.
blankVal specifies the default number to use for blank values in numerical
columns.
convertSubLists specifies whether embedded sublists (e.g., "{x,y,z}" should be
recognized and processed; if so, each such column becomes a *list of NumPy arrays*;
if convertSubLists=False, then each such column is a list of strings.
If expandSublists=True, then embedded sublists are converted into extra
columns (this forces convertSubLists to be True).
If dataFrame=True, then the result is a ListDataFrame object.
If columnRow = x, then that line [0-based; first line = 0, etc.] is assumed
to contain column headers and is processed accordingly (only useful if
dataFrame = True as well).
In addition, if columnRow != None, then subListSuffixes can be used to
modify the column names for sublists (newNames[i] = origName + "_" + subListSuffixes[i]);
if subListSuffixes is None [the default], then renamed column names have
"_0", "_1", etc. as suffixes.
"""
subListsFound = False
if noConvert is None:
noConvert = []
if intCols is None:
intCols = []
if expandSubLists is True:
convertSubLists = True
if convertSubLists is True:
subListCols = []
subListLengths = {}
subListLengthList = []
nDataRows = CountDataLinesInFile(fileName, skip=skip)
nAllRows = CountLinesInFile(fileName)
# open file in "universal" mode to convert Mac or DOS line endings to \n
inFile = open(fileName, 'rU')
dlines = [line.rstrip() for line in inFile if len(line.strip()) > 0 and line[0] not in skip ]
# if requested, extract column names
if ((columnRow is not None) and (columnRow >= 0) and (columnRow < nAllRows)):
inFile.seek(0,0) # rewind to beginning of file
i = 0
while (i <= columnRow):
line = inFile.readline()
i += 1
colHeaderLine = line.strip("#")
pp = colHeaderLine.split(delimiter)
colNames = [ p.strip() for p in pp ]
else:
colNames = None
inFile.close()
# Figure out number of columns, which ones are non-numeric, and which have
# sub-lists (if we're allowing for the latter)
pp = dlines[0].split(delimiter)
nInputCols = len(pp)
nonNumberCols = []
for i in range(nInputCols):
if (i not in intCols) and (i not in noConvert):
# check to make sure this column has numbers
try:
x = float(pp[i])
except ValueError:
if convertSubLists is True and pp[i].find("{") >= 0:
# a-ha, this is a column with sublists, so let's convert it
subListsFound = True
subListCols.append(i)
ppp = pp[i].split(",")
nSubLists = len(ppp)
subListLengths[i] = nSubLists
subListLengthList.append(nSubLists)
else:
noConvert.append(i)
# Create the master list of input columns
dataList = []
for i in range(nInputCols):
dataList.append([])
# go through the table and assign entries to individual-column lists
for n in range(nDataRows):
pieces = dlines[n].split(delimiter)
for i in range(nInputCols):
dataList[i].append(pieces[i])
# Now convert columns to NumPy arrays, if possible:
if (not expandSubLists):
# "Normal" approach (if sublists columns exist, then each is stored
# as a list of NumPy arrays); total number of columns is unchanged.
# Note that columns in noConvert are left untouched (as list of strings)
for i in range(nInputCols):
if i in intCols:
dataList[i] = np.array(dataList[i], "Int64")
elif convertSubLists is True and i in subListCols:
dataList[i] = ExtractSubLists(dataList[i], subListLengths[i])
elif i not in noConvert:
# this must, by default, be a floating-point column
dataList[i] = ColumnToFloats(dataList[i], blankVal)
else:
# Alternate approach, where we expand sublists into individual, new columns.
# Have to be careful, since number of columns in dataList will be changing...
# Note that columns in noConvert are left untouched (as list of strings)
nAddedCols = 0
for i_orig in range(nInputCols): # i_orig = index into original (input) columns
ii = i_orig + nAddedCols # ii = index into current version of dataList
if i_orig in intCols:
dataList[ii] = np.array(dataList[ii], "Int64")
elif i_orig in subListCols:
# number of added cols = n(subLists) - 1, bcs. we *remove* original column
nAddedCols += subListLengths[i_orig] - 1
listOfSublists = ExtractSubLists(dataList[ii], subListLengths[i_orig])
InsertAndReplace(dataList, ii, listOfSublists)
elif i_orig not in noConvert:
# this must, by default, be a floating-point column
dataList[ii] = ColumnToFloats(dataList[ii], blankVal)
# OK, if there were sublists *and* we generated extra columns, update
# the colNames list to account for extra columns
if (expandSubLists is True) and (subListsFound is True) and (colNames is not None):
AddExtraColumnNames(colNames, subListCols, subListLengthList, subListSuffixes)
if dataFrame:
return ListDataFrame(dataList, colNames)
else:
return dataList | def ReadCompositeTable( fileName, skip="#", delimiter=None, noConvert=None,
intCols=None, blankVal=0, convertSubLists=False, expandSubLists=False,
dataFrame=False, columnRow=None, subListSuffixes=None ):
"""Function which reads a text datafile and returns a list of columns.
Comments and other lines to skip should start with the "skip" character
(which by default is "#"); column separators are specified with "delimiter"
(default is whitespace).
By default, all columns are converted to 1-D NumPy arrays, unless
the data in that column are non-numeric [only the first row of data is
checked to see which columns might be non-numeric] or the column number
[0-based: first column = 0, 2nd column = 1, etc.] is in the noConvert list.
Numeric columns with column number in intCols (list) are converted to Int64 arrays;
all other numeric columns become Float64 arrays.
blankVal specifies the default number to use for blank values in numerical
columns.
convertSubLists specifies whether embedded sublists (e.g., "{x,y,z}" should be
recognized and processed; if so, each such column becomes a *list of NumPy arrays*;
if convertSubLists=False, then each such column is a list of strings.
If expandSublists=True, then embedded sublists are converted into extra
columns (this forces convertSubLists to be True).
If dataFrame=True, then the result is a ListDataFrame object.
If columnRow = x, then that line [0-based; first line = 0, etc.] is assumed
to contain column headers and is processed accordingly (only useful if
dataFrame = True as well).
In addition, if columnRow != None, then subListSuffixes can be used to
modify the column names for sublists (newNames[i] = origName + "_" + subListSuffixes[i]);
if subListSuffixes is None [the default], then renamed column names have
"_0", "_1", etc. as suffixes.
"""
subListsFound = False
if noConvert is None:
noConvert = []
if intCols is None:
intCols = []
if expandSubLists is True:
convertSubLists = True
if convertSubLists is True:
subListCols = []
subListLengths = {}
subListLengthList = []
nDataRows = CountDataLinesInFile(fileName, skip=skip)
nAllRows = CountLinesInFile(fileName)
# open file in "universal" mode to convert Mac or DOS line endings to \n
inFile = open(fileName, 'rU')
dlines = [line.rstrip() for line in inFile if len(line.strip()) > 0 and line[0] not in skip ]
# if requested, extract column names
if ((columnRow is not None) and (columnRow >= 0) and (columnRow < nAllRows)):
inFile.seek(0,0) # rewind to beginning of file
i = 0
while (i <= columnRow):
line = inFile.readline()
i += 1
colHeaderLine = line.strip("#")
pp = colHeaderLine.split(delimiter)
colNames = [ p.strip() for p in pp ]
else:
colNames = None
inFile.close()
# Figure out number of columns, which ones are non-numeric, and which have
# sub-lists (if we're allowing for the latter)
pp = dlines[0].split(delimiter)
nInputCols = len(pp)
nonNumberCols = []
for i in range(nInputCols):
if (i not in intCols) and (i not in noConvert):
# check to make sure this column has numbers
try:
x = float(pp[i])
except ValueError:
if convertSubLists is True and pp[i].find("{") >= 0:
# a-ha, this is a column with sublists, so let's convert it
subListsFound = True
subListCols.append(i)
ppp = pp[i].split(",")
nSubLists = len(ppp)
subListLengths[i] = nSubLists
subListLengthList.append(nSubLists)
else:
noConvert.append(i)
# Create the master list of input columns
dataList = []
for i in range(nInputCols):
dataList.append([])
# go through the table and assign entries to individual-column lists
for n in range(nDataRows):
pieces = dlines[n].split(delimiter)
for i in range(nInputCols):
dataList[i].append(pieces[i])
# Now convert columns to NumPy arrays, if possible:
if (not expandSubLists):
# "Normal" approach (if sublists columns exist, then each is stored
# as a list of NumPy arrays); total number of columns is unchanged.
# Note that columns in noConvert are left untouched (as list of strings)
for i in range(nInputCols):
if i in intCols:
dataList[i] = np.array(dataList[i], "Int64")
elif convertSubLists is True and i in subListCols:
dataList[i] = ExtractSubLists(dataList[i], subListLengths[i])
elif i not in noConvert:
# this must, by default, be a floating-point column
dataList[i] = ColumnToFloats(dataList[i], blankVal)
else:
# Alternate approach, where we expand sublists into individual, new columns.
# Have to be careful, since number of columns in dataList will be changing...
# Note that columns in noConvert are left untouched (as list of strings)
nAddedCols = 0
for i_orig in range(nInputCols): # i_orig = index into original (input) columns
ii = i_orig + nAddedCols # ii = index into current version of dataList
if i_orig in intCols:
dataList[ii] = np.array(dataList[ii], "Int64")
elif i_orig in subListCols:
# number of added cols = n(subLists) - 1, bcs. we *remove* original column
nAddedCols += subListLengths[i_orig] - 1
listOfSublists = ExtractSubLists(dataList[ii], subListLengths[i_orig])
InsertAndReplace(dataList, ii, listOfSublists)
elif i_orig not in noConvert:
# this must, by default, be a floating-point column
dataList[ii] = ColumnToFloats(dataList[ii], blankVal)
# OK, if there were sublists *and* we generated extra columns, update
# the colNames list to account for extra columns
if (expandSubLists is True) and (subListsFound is True) and (colNames is not None):
AddExtraColumnNames(colNames, subListCols, subListLengthList, subListSuffixes)
if dataFrame:
return ListDataFrame(dataList, colNames)
else:
return dataList |
Python | def ReadCompositeTableFromText( textLines, skip="#", delimiter=None, noConvert=None,
intCols=None, blankVal=0, convertSubLists=False, expandSubLists=False,
dataFrame=False, columnRow=None, subListSuffixes=None ):
"""Identical to ReadCompositeTable, except that it accepts a list of lines
(each line a string), with the first line assumed to be column headers.
"""
subListsFound = False
if noConvert is None:
noConvert = []
if intCols is None:
intCols = []
if expandSubLists is True:
convertSubLists = True
if convertSubLists is True:
subListCols = []
subListLengths = {}
subListLengthList = []
nAllRows = len(textLines)
dlines = [line.rstrip() for line in textLines if len(line.strip()) > 0 and line[0] not in skip ]
nDataRows = len(dlines)
# if requested, extract column names
if ((columnRow is not None) and (columnRow >= 0) and (columnRow < nAllRows)):
colHeaderLine = textLines[columnRow].strip(skip)
colNames = [ p.strip() for p in colHeaderLine.split(delimiter) ]
else:
colNames = None
# Figure out number of columns, which ones are non-numeric, and which have
# sub-lists (if we're allowing for the latter)
pp = dlines[0].split(delimiter)
nInputCols = len(pp)
nonNumberCols = []
for i in range(nInputCols):
if (i not in intCols) and (i not in noConvert):
# check to make sure this column has numbers
try:
x = float(pp[i])
except ValueError:
if convertSubLists is True and pp[i].find("{") >= 0:
# a-ha, this is a column with sublists, so let's convert it
subListsFound = True
subListCols.append(i)
ppp = pp[i].split(",")
nSubLists = len(ppp)
subListLengths[i] = nSubLists
subListLengthList.append(nSubLists)
else:
noConvert.append(i)
# Create the master list of input columns
dataList = []
for i in range(nInputCols):
dataList.append([])
# go through the table and assign entries to individual-column lists
for n in range(nDataRows):
pieces = dlines[n].split(delimiter)
for i in range(nInputCols):
dataList[i].append(pieces[i])
# Now convert columns to NumPy arrays, if possible:
if (not expandSubLists):
# "Normal" approach (if sublists columns exist, then each is stored
# as a list of NumPy arrays); total number of columns is unchanged.
# Note that columns in noConvert are left untouched (as list of strings)
for i in range(nInputCols):
if i in intCols:
dataList[i] = np.array(dataList[i], "Int64")
elif convertSubLists is True and i in subListCols:
dataList[i] = ExtractSubLists(dataList[i], subListLengths[i])
elif i not in noConvert:
# this must, by default, be a floating-point column
dataList[i] = ColumnToFloats(dataList[i], blankVal)
else:
# Alternate approach, where we expand sublists into individual, new columns.
# Have to be careful, since number of columns in dataList will be changing...
# Note that columns in noConvert are left untouched (as list of strings)
nAddedCols = 0
for i_orig in range(nInputCols): # i_orig = index into original (input) columns
ii = i_orig + nAddedCols # ii = index into current version of dataList
if i_orig in intCols:
dataList[ii] = np.array(dataList[ii], "Int64")
elif i_orig in subListCols:
# number of added cols = n(subLists) - 1, bcs. we *remove* original column
nAddedCols += subListLengths[i_orig] - 1
listOfSublists = ExtractSubLists(dataList[ii], subListLengths[i_orig])
InsertAndReplace(dataList, ii, listOfSublists)
elif i_orig not in noConvert:
# this must, by default, be a floating-point column
dataList[ii] = ColumnToFloats(dataList[ii], blankVal)
# OK, if there were sublists *and* we generated extra columns, update
# the colNames list to account for extra columns
if (expandSubLists is True) and (subListsFound is True) and (colNames is not None):
AddExtraColumnNames(colNames, subListCols, subListLengthList, subListSuffixes)
if dataFrame:
return ListDataFrame(dataList, colNames)
else:
return dataList | def ReadCompositeTableFromText( textLines, skip="#", delimiter=None, noConvert=None,
intCols=None, blankVal=0, convertSubLists=False, expandSubLists=False,
dataFrame=False, columnRow=None, subListSuffixes=None ):
"""Identical to ReadCompositeTable, except that it accepts a list of lines
(each line a string), with the first line assumed to be column headers.
"""
subListsFound = False
if noConvert is None:
noConvert = []
if intCols is None:
intCols = []
if expandSubLists is True:
convertSubLists = True
if convertSubLists is True:
subListCols = []
subListLengths = {}
subListLengthList = []
nAllRows = len(textLines)
dlines = [line.rstrip() for line in textLines if len(line.strip()) > 0 and line[0] not in skip ]
nDataRows = len(dlines)
# if requested, extract column names
if ((columnRow is not None) and (columnRow >= 0) and (columnRow < nAllRows)):
colHeaderLine = textLines[columnRow].strip(skip)
colNames = [ p.strip() for p in colHeaderLine.split(delimiter) ]
else:
colNames = None
# Figure out number of columns, which ones are non-numeric, and which have
# sub-lists (if we're allowing for the latter)
pp = dlines[0].split(delimiter)
nInputCols = len(pp)
nonNumberCols = []
for i in range(nInputCols):
if (i not in intCols) and (i not in noConvert):
# check to make sure this column has numbers
try:
x = float(pp[i])
except ValueError:
if convertSubLists is True and pp[i].find("{") >= 0:
# a-ha, this is a column with sublists, so let's convert it
subListsFound = True
subListCols.append(i)
ppp = pp[i].split(",")
nSubLists = len(ppp)
subListLengths[i] = nSubLists
subListLengthList.append(nSubLists)
else:
noConvert.append(i)
# Create the master list of input columns
dataList = []
for i in range(nInputCols):
dataList.append([])
# go through the table and assign entries to individual-column lists
for n in range(nDataRows):
pieces = dlines[n].split(delimiter)
for i in range(nInputCols):
dataList[i].append(pieces[i])
# Now convert columns to NumPy arrays, if possible:
if (not expandSubLists):
# "Normal" approach (if sublists columns exist, then each is stored
# as a list of NumPy arrays); total number of columns is unchanged.
# Note that columns in noConvert are left untouched (as list of strings)
for i in range(nInputCols):
if i in intCols:
dataList[i] = np.array(dataList[i], "Int64")
elif convertSubLists is True and i in subListCols:
dataList[i] = ExtractSubLists(dataList[i], subListLengths[i])
elif i not in noConvert:
# this must, by default, be a floating-point column
dataList[i] = ColumnToFloats(dataList[i], blankVal)
else:
# Alternate approach, where we expand sublists into individual, new columns.
# Have to be careful, since number of columns in dataList will be changing...
# Note that columns in noConvert are left untouched (as list of strings)
nAddedCols = 0
for i_orig in range(nInputCols): # i_orig = index into original (input) columns
ii = i_orig + nAddedCols # ii = index into current version of dataList
if i_orig in intCols:
dataList[ii] = np.array(dataList[ii], "Int64")
elif i_orig in subListCols:
# number of added cols = n(subLists) - 1, bcs. we *remove* original column
nAddedCols += subListLengths[i_orig] - 1
listOfSublists = ExtractSubLists(dataList[ii], subListLengths[i_orig])
InsertAndReplace(dataList, ii, listOfSublists)
elif i_orig not in noConvert:
# this must, by default, be a floating-point column
dataList[ii] = ColumnToFloats(dataList[ii], blankVal)
# OK, if there were sublists *and* we generated extra columns, update
# the colNames list to account for extra columns
if (expandSubLists is True) and (subListsFound is True) and (colNames is not None):
AddExtraColumnNames(colNames, subListCols, subListLengthList, subListSuffixes)
if dataFrame:
return ListDataFrame(dataList, colNames)
else:
return dataList |
Python | def EllipseCircum( a, b ):
"""Calcluates circumference of an ellipse.
Given an ellipse defined by semi-major axis a and semi-minor axis b,
the function returns the circumference of the ellipse, using the
approximation of Ramanujan.
Parameters
----------
a : float
semi-major axis of ellipse
b : float
semi-minor axis of ellipse
Returns
-------
circumference : float
"""
return math.pi * ( 3*(a + b) - math.sqrt( (3*a + b)*(a + 3*b) ) ) | def EllipseCircum( a, b ):
"""Calcluates circumference of an ellipse.
Given an ellipse defined by semi-major axis a and semi-minor axis b,
the function returns the circumference of the ellipse, using the
approximation of Ramanujan.
Parameters
----------
a : float
semi-major axis of ellipse
b : float
semi-minor axis of ellipse
Returns
-------
circumference : float
"""
return math.pi * ( 3*(a + b) - math.sqrt( (3*a + b)*(a + 3*b) ) ) |
Python | def EllipseR( a, ellipticity, pa, referencePA ):
"""Calcute radius where vector from center crosses boundary of an ellipse.
Given an ellipse with specified semi-major axis, ellipticity, and
position angle (a, ellipticity, pa), this function returns the radius
from the ellipse center where a vector from the center of the ellipse
at position angle = referencePA intersects the ellipse. Angles are
assumed to be in degrees.
Parameters
----------
a : float
semi-major axis of ellipse
ellipticity : float
ellipse ellipticity (1 - b/a)
pa : float
position angle of ellipse major axis [degrees]
referencePA: float
position angle of vector [degrees]
Returns
-------
radius : float
distance from the ellipse center to ellipse along vector
"""
b = a*(1.0 - ellipticity)
dPA_rad = np.radians(pa - referencePA)
return 1.0 / np.sqrt( (np.cos(dPA_rad)/a)**2 + (np.sin(dPA_rad)/b)**2 ) | def EllipseR( a, ellipticity, pa, referencePA ):
"""Calcute radius where vector from center crosses boundary of an ellipse.
Given an ellipse with specified semi-major axis, ellipticity, and
position angle (a, ellipticity, pa), this function returns the radius
from the ellipse center where a vector from the center of the ellipse
at position angle = referencePA intersects the ellipse. Angles are
assumed to be in degrees.
Parameters
----------
a : float
semi-major axis of ellipse
ellipticity : float
ellipse ellipticity (1 - b/a)
pa : float
position angle of ellipse major axis [degrees]
referencePA: float
position angle of vector [degrees]
Returns
-------
radius : float
distance from the ellipse center to ellipse along vector
"""
b = a*(1.0 - ellipticity)
dPA_rad = np.radians(pa - referencePA)
return 1.0 / np.sqrt( (np.cos(dPA_rad)/a)**2 + (np.sin(dPA_rad)/b)**2 ) |
Python | def _ReadEllipse_fits( filename ):
"""Read an ellipse-fit table in FITS format.
Utility function to read an IRAF ellipse fit in FITS table format, as generated
by the tables.tcopy task. Output is a tuple of
(dictionary containing the columns, list of lower-cased column names).
Parameters
----------
filename : str
path to FITS table
Returns
-------
(dataDict, columNames) : tuple of (dict, list)
dataDict = dict mapping column names (str) to 1D numpy.ndarray
columnNames = list of str for column names
"""
tabdat = fits.open(filename)[1].data
dataDict = {}
columnNames = []
for columnDef in tabdat.columns:
colName = columnDef.name.lower()
dataDict[colName] = np.array(columnDef.array)
columnNames.append(colName)
return (dataDict, columnNames) | def _ReadEllipse_fits( filename ):
"""Read an ellipse-fit table in FITS format.
Utility function to read an IRAF ellipse fit in FITS table format, as generated
by the tables.tcopy task. Output is a tuple of
(dictionary containing the columns, list of lower-cased column names).
Parameters
----------
filename : str
path to FITS table
Returns
-------
(dataDict, columNames) : tuple of (dict, list)
dataDict = dict mapping column names (str) to 1D numpy.ndarray
columnNames = list of str for column names
"""
tabdat = fits.open(filename)[1].data
dataDict = {}
columnNames = []
for columnDef in tabdat.columns:
colName = columnDef.name.lower()
dataDict[colName] = np.array(columnDef.array)
columnNames.append(colName)
return (dataDict, columnNames) |
Python | def _ReadEllipse_tprint( lines ):
"""Read an ellipse-fit table in tprint format. [Deprecated]
Utility function to read an IRAF ellipse fit in tprint-generated text-file
form (supplied as a list of lines read in from the file). Output is a tuple of
(dictionary containing the columns, list of lower-cased column names).
Parameters
----------
lines : list of str
list of lines read from text file
Returns
-------
(dataDict, columNames) : tuple of (dict, list)
dataDict = dict mapping column names (str) to 1D numpy.ndarray
columnNames = list of str for column names
"""
commentlines = [line.strip() for line in lines if line[0] == "#"]
# skip first comment line
nBlocks = int(len(commentlines[1:]) / 2)
columnNameLines = [ commentlines[2*i + 1] for i in range(nBlocks) ]
datalines = [line.strip() for line in lines if line[0] not in ["#", "\n"] ]
nPts = int(len(datalines)/nBlocks)
dataDict = {}
columnNameList = []
for i in range(nBlocks):
pp = columnNameLines[i].split()
colNames = [ name.lower() for name in pp[1:] ]
nCols = len(colNames)
for colname in colNames:
if colname != "row":
dataDict[colname] = []
columnNameList.append(colname)
for j in range(nPts):
dataline = datalines[nPts*i + j]
datapieces = dataline.split()
for cc in range(nCols):
colName = colNames[cc]
dataPiece = datapieces[cc]
if colName != "row":
if colName in integerColumns:
try:
dataVal = int(dataPiece)
except ValueError:
print(dataline)
print(dataPiece)
raise
else:
try:
dataVal = float(dataPiece)
except ValueError as e:
if dataPiece == "INDEF":
dataVal = 0.0
else:
raise ValueError(e)
dataDict[colName].append(dataVal)
return dataDict, columnNameList | def _ReadEllipse_tprint( lines ):
"""Read an ellipse-fit table in tprint format. [Deprecated]
Utility function to read an IRAF ellipse fit in tprint-generated text-file
form (supplied as a list of lines read in from the file). Output is a tuple of
(dictionary containing the columns, list of lower-cased column names).
Parameters
----------
lines : list of str
list of lines read from text file
Returns
-------
(dataDict, columNames) : tuple of (dict, list)
dataDict = dict mapping column names (str) to 1D numpy.ndarray
columnNames = list of str for column names
"""
commentlines = [line.strip() for line in lines if line[0] == "#"]
# skip first comment line
nBlocks = int(len(commentlines[1:]) / 2)
columnNameLines = [ commentlines[2*i + 1] for i in range(nBlocks) ]
datalines = [line.strip() for line in lines if line[0] not in ["#", "\n"] ]
nPts = int(len(datalines)/nBlocks)
dataDict = {}
columnNameList = []
for i in range(nBlocks):
pp = columnNameLines[i].split()
colNames = [ name.lower() for name in pp[1:] ]
nCols = len(colNames)
for colname in colNames:
if colname != "row":
dataDict[colname] = []
columnNameList.append(colname)
for j in range(nPts):
dataline = datalines[nPts*i + j]
datapieces = dataline.split()
for cc in range(nCols):
colName = colNames[cc]
dataPiece = datapieces[cc]
if colName != "row":
if colName in integerColumns:
try:
dataVal = int(dataPiece)
except ValueError:
print(dataline)
print(dataPiece)
raise
else:
try:
dataVal = float(dataPiece)
except ValueError as e:
if dataPiece == "INDEF":
dataVal = 0.0
else:
raise ValueError(e)
dataDict[colName].append(dataVal)
return dataDict, columnNameList |
Python | def _ReadEllipse_tdump( lines ):
"""Read an ellipse-fit table in tdump format.
Utility function to read an IRAF ellipse fit in tdump-generated text-file
form (supplied as a list of lines read in from the file). Output is a tuple of
(dictionary containing the columns, list of lower-cased column names, name of
original fitted image).
Parameters
----------
lines : list of str
list of lines read from text file
Returns
-------
(dataDict, columNames) : tuple of (dict, list)
dataDict = dict mapping column names (str) to 1D numpy.ndarray
columnNames = list of str for column names
"""
for i in range(len(lines)):
if lines[i].startswith("IMAGE"):
lastHeader = i
colHeaderlines = lines[0:lastHeader] # very last "header" line is IMAGE name
imageName = lines[lastHeader].split()[-1].strip()
datalines = lines[lastHeader + 1:]
nDataRows = len(datalines)
columnNameList = [ line.split()[0].lower() for line in colHeaderlines ]
nColumns = len(colHeaderlines)
nElements = len(datalines[0].split())
if (nElements != nColumns):
msg = "ERROR: Number of column titles (%d) not equal to number of columns (%d)!" % (nElements, nColumns)
print(msg)
return None, None
dataList = []
for i in range(nColumns):
dataList.append([])
# go through the table and assign entries to individual-column lists
for n in range(nDataRows):
pieces = datalines[n].split()
for i in range(nColumns):
colName = columnNameList[i]
dataPiece = pieces[i]
if colName in integerColumns:
try:
dataVal = int(dataPiece)
except ValueError:
print(datalines[n])
print(dataPiece)
raise ValueError(e)
else:
try:
dataVal = float(dataPiece)
except ValueError as e:
if dataPiece == "INDEF":
dataVal = 0.0
else:
raise ValueError(e)
dataList[i].append(dataVal)
# create dictionary and assign columns to column names
dataDict = {}
for i in range(nColumns):
dataDict[columnNameList[i]] = dataList[i]
return dataDict, columnNameList, imageName | def _ReadEllipse_tdump( lines ):
"""Read an ellipse-fit table in tdump format.
Utility function to read an IRAF ellipse fit in tdump-generated text-file
form (supplied as a list of lines read in from the file). Output is a tuple of
(dictionary containing the columns, list of lower-cased column names, name of
original fitted image).
Parameters
----------
lines : list of str
list of lines read from text file
Returns
-------
(dataDict, columNames) : tuple of (dict, list)
dataDict = dict mapping column names (str) to 1D numpy.ndarray
columnNames = list of str for column names
"""
for i in range(len(lines)):
if lines[i].startswith("IMAGE"):
lastHeader = i
colHeaderlines = lines[0:lastHeader] # very last "header" line is IMAGE name
imageName = lines[lastHeader].split()[-1].strip()
datalines = lines[lastHeader + 1:]
nDataRows = len(datalines)
columnNameList = [ line.split()[0].lower() for line in colHeaderlines ]
nColumns = len(colHeaderlines)
nElements = len(datalines[0].split())
if (nElements != nColumns):
msg = "ERROR: Number of column titles (%d) not equal to number of columns (%d)!" % (nElements, nColumns)
print(msg)
return None, None
dataList = []
for i in range(nColumns):
dataList.append([])
# go through the table and assign entries to individual-column lists
for n in range(nDataRows):
pieces = datalines[n].split()
for i in range(nColumns):
colName = columnNameList[i]
dataPiece = pieces[i]
if colName in integerColumns:
try:
dataVal = int(dataPiece)
except ValueError:
print(datalines[n])
print(dataPiece)
raise ValueError(e)
else:
try:
dataVal = float(dataPiece)
except ValueError as e:
if dataPiece == "INDEF":
dataVal = 0.0
else:
raise ValueError(e)
dataList[i].append(dataVal)
# create dictionary and assign columns to column names
dataDict = {}
for i in range(nColumns):
dataDict[columnNameList[i]] = dataList[i]
return dataDict, columnNameList, imageName |
Python | def ReadEllipse( filename, pix=None, dataFrame=True, correctPA=True,
telPA=None, flip=False, ZP=None, smaUnits=None ):
"""Read in an ellipse-fit table from a file.
Reads in an ellipse fit table file generated by the IRAF STSDAS task ellipse
and stores it in a dictionary or a ListDataFrame [default]. The original column
names are transformed into lower-case (e.g., ELLIP_ERR --> ellip_err). All
columns are converted to 1-D numpy.ndarray objects.
Returns a datautils.ListDataFrame object [default] or (if dataFrame=False), a
Python dict mapping column names to numpy arrays.
The input table is assumed to be a text file which has been generated from the
original STSDAS-table file via either "tprint" or "tdump". The function will
attempt to guess which format is in use from the first few lines of the file.
Alternately, it can be a FITS table generated by "tcopy" (with filename ending
in ".fit", ".fits", ".FIT", or ".FITS").
Parameters
----------
filename : str
path to table file containing output of IRAF ellipse (converted from STSDAS
table form)
pix : float or None, optional
scale for pixels (e.g., arc seconds per pixel)
dataFrame : bool, optional
if True [default], output is datautils.ListDataFrame; otherwise, output is dict
correctPA : bool, optional
if True [default], position angles are corrected to standard astronomical
form
telPA : float or None, optional
orientation of image (position of image +y axis on sky, in degrees E of N)
flip : bool, optional
if True, position angles are flipped about the y-axis
ZP : float or None, optional
effective zero point for converting counts/pixel into mag arcsec^-2
smaUnits : str or None, optional
label describing units for semi-major axis (after applying `pix` conversion)
Returns
-------
ellipseFit : dict or datautils.ListDataFrame
Notes
-----
If correctPA = True [the default], then position angles are corrected
from the raw ellipse-fit values (-90 < pa < +90) to standard astronomical values;
the original position-angle values are stored in a new column named "raw_pa".
By default, this corrects position angles to degrees CCW of the image +y axis.
Alternately, the user can specify the angle of the +y axis on the sky, in
degrees E of N, via the telPA keyword.
If the pixel scale (e.g., in units of arcsec/pixel) is known, it can be supplied
via the pix keyword, and the semi-major axis (as well as semi-minor axis
and equivalent radius) will be converted from pixels to arc seconds.
The original semi-major axis pixel values are stored in an additional column
named "sma_pix".
The following additional columns are automatically generated:
"b" = semi-minor axes of the fitted ellipses
"req" = equivalent radii [sqrt(a*b)] of the fitted ellipses
"q" = axis ratios of the fitted ellipses (= 1 - ellipticity)
If the requested output is a datautils.ListDataFrame object, then a few additional
metadata attributes are defined:
dataframe.tableFile has the path to the original input table file
dataframe.sma_units = "pixels" or "arcsec" (the latter only if the user
specified a pixel scale via the pix keyword) or user-supplied smaUnits
dataframe.units_per_pix has the user-supplied pixel scale (if any)
dataframe.origImage has the original fitted image name -- *if* the input
ellipse-fit file was in tdump format (tprint format does not preserve that
information, nor does converting the table to FITS format with tcopy).
dataframe.zp_sb = user-supplied effective zero point (= None if none was
supplied)
In addition, the ListDataFrame object will have additional alternate
column names "a" for "SMA" and "i" for "INTENS".
"""
# Call internal utility functions to get basic dict mapping column names to
# lists of values and list of column names, based on format of input file
fnameBase = os.path.split(filename)
rootName,ext = os.path.splitext(filename)
if ext in [".fit", ".fits", ".FIT", ".FITS"]:
# FITS table
dataDict, columnNameList = _ReadEllipse_fits(filename)
originalImage = None
else:
# text-file format
lines = open(filename).readlines()
# identify whether it's tprint or tdump output and read it in accordingly
if lines[0].startswith("# Table"):
# table was generated by tprint
dataDict, columnNameList = _ReadEllipse_tprint(lines)
originalImage = None
elif lines[0].startswith("SMA R %7.2f pixel"):
# table was generated by tdump
dataDict, columnNameList, originalImage = _ReadEllipse_tdump(lines)
# Post-processing:
# correct the position angles:
dataDict["raw_pa"] = dataDict["pa"]
dataDict["pa"] = CorrectPosAngle(dataDict["pa"], telPA, flip)
# provide more useful/predictable intensity-error key:
dataDict["intens_err"] = dataDict["int_err"]
# change SMA value from pixels to arcsec (or other user-defined unit):
if smaUnits is None:
if pix is None:
smaUnits = "pixels"
else:
smaUnits = "arc sec"
dataDict["sma_pix"] = dataDict["sma"]
userPixelScale = None
if pix is None:
pix = 1.0
else:
userPixelScale = pix
dataDict["sma"] = [ pix*sma for sma in dataDict["sma"] ]
# add list of column names in original order
columnNameList.append("raw_pa")
columnNameList.append("sma_pix")
# compute correct higher-order harmonic values, if present:
higherHarmonicsPresent = False
for rawnameHH in HIGHER_HARMONIC_RAWNAMES:
if rawnameHH in columnNameList:
higherHarmonicsPresent = True
if higherHarmonicsPresent is True:
sma = np.array(dataDict["sma_pix"])
gradient = np.array(dataDict["grad"])
sma_times_grad = sma*gradient
for rawHarmonic in HIGHER_HARMONIC_RAWNAMES:
if rawHarmonic in columnNameList:
rawData = np.array(dataDict[rawHarmonic])
rawErr = np.array(dataDict[rawHarmonic + "_err"])
# do conversion using -(raw_amplitude) [e.g., Eq.6 in Ciambur 2015]
scaledData = -rawData / sma_times_grad
scaledErr = rawErr / sma_times_grad
scaledName = SCALED_NAMES[rawHarmonic]
scaledErrName = scaledName + "_err"
dataDict[scaledName] = scaledData
dataDict[scaledErrName] = scaledErr
columnNameList.append(scaledName)
columnNameList.append(scaledErrName)
# Convert all columns to numpy arrays
for cname in columnNameList:
dataDict[cname] = np.array(dataDict[cname])
if ZP is not None:
dataDict["sb"] = ZP - 2.5*np.log10(dataDict['intens'])
columnNameList.append("sb")
# Generate semi-minor axis, axis ratio, equivalent radius req = sqrt(a^2 + b^2)
dataDict["b"] = (1.0 - dataDict["ellip"]) * dataDict["sma"]
dataDict["q"] = (1.0 - dataDict["ellip"])
dataDict["q_err"] = dataDict["ellip_err"]
dataDict["req"] = EquivRadius(dataDict)
columnNameList.append("b")
columnNameList.append("q")
columnNameList.append("q_err")
columnNameList.append("req")
dataDict["column_list"] = columnNameList
# Convert to datautils.ListDataFrame, if requested:
if dataFrame is True:
frameList = []
for cname in columnNameList:
frameList.append(dataDict[cname])
result = du.ListDataFrame(frameList, columnNameList)
# extra conveniences (aliased column names)
result.AddColumnName("sma", "a")
result.AddColumnName("intens", "i")
# add meta-data
result.tableFile = filename
result.sma_units = smaUnits
result.units_per_pix = userPixelScale
result.origImage = originalImage
result.zp_sb = ZP
else:
result = dataDict
return result | def ReadEllipse( filename, pix=None, dataFrame=True, correctPA=True,
telPA=None, flip=False, ZP=None, smaUnits=None ):
"""Read in an ellipse-fit table from a file.
Reads in an ellipse fit table file generated by the IRAF STSDAS task ellipse
and stores it in a dictionary or a ListDataFrame [default]. The original column
names are transformed into lower-case (e.g., ELLIP_ERR --> ellip_err). All
columns are converted to 1-D numpy.ndarray objects.
Returns a datautils.ListDataFrame object [default] or (if dataFrame=False), a
Python dict mapping column names to numpy arrays.
The input table is assumed to be a text file which has been generated from the
original STSDAS-table file via either "tprint" or "tdump". The function will
attempt to guess which format is in use from the first few lines of the file.
Alternately, it can be a FITS table generated by "tcopy" (with filename ending
in ".fit", ".fits", ".FIT", or ".FITS").
Parameters
----------
filename : str
path to table file containing output of IRAF ellipse (converted from STSDAS
table form)
pix : float or None, optional
scale for pixels (e.g., arc seconds per pixel)
dataFrame : bool, optional
if True [default], output is datautils.ListDataFrame; otherwise, output is dict
correctPA : bool, optional
if True [default], position angles are corrected to standard astronomical
form
telPA : float or None, optional
orientation of image (position of image +y axis on sky, in degrees E of N)
flip : bool, optional
if True, position angles are flipped about the y-axis
ZP : float or None, optional
effective zero point for converting counts/pixel into mag arcsec^-2
smaUnits : str or None, optional
label describing units for semi-major axis (after applying `pix` conversion)
Returns
-------
ellipseFit : dict or datautils.ListDataFrame
Notes
-----
If correctPA = True [the default], then position angles are corrected
from the raw ellipse-fit values (-90 < pa < +90) to standard astronomical values;
the original position-angle values are stored in a new column named "raw_pa".
By default, this corrects position angles to degrees CCW of the image +y axis.
Alternately, the user can specify the angle of the +y axis on the sky, in
degrees E of N, via the telPA keyword.
If the pixel scale (e.g., in units of arcsec/pixel) is known, it can be supplied
via the pix keyword, and the semi-major axis (as well as semi-minor axis
and equivalent radius) will be converted from pixels to arc seconds.
The original semi-major axis pixel values are stored in an additional column
named "sma_pix".
The following additional columns are automatically generated:
"b" = semi-minor axes of the fitted ellipses
"req" = equivalent radii [sqrt(a*b)] of the fitted ellipses
"q" = axis ratios of the fitted ellipses (= 1 - ellipticity)
If the requested output is a datautils.ListDataFrame object, then a few additional
metadata attributes are defined:
dataframe.tableFile has the path to the original input table file
dataframe.sma_units = "pixels" or "arcsec" (the latter only if the user
specified a pixel scale via the pix keyword) or user-supplied smaUnits
dataframe.units_per_pix has the user-supplied pixel scale (if any)
dataframe.origImage has the original fitted image name -- *if* the input
ellipse-fit file was in tdump format (tprint format does not preserve that
information, nor does converting the table to FITS format with tcopy).
dataframe.zp_sb = user-supplied effective zero point (= None if none was
supplied)
In addition, the ListDataFrame object will have additional alternate
column names "a" for "SMA" and "i" for "INTENS".
"""
# Call internal utility functions to get basic dict mapping column names to
# lists of values and list of column names, based on format of input file
fnameBase = os.path.split(filename)
rootName,ext = os.path.splitext(filename)
if ext in [".fit", ".fits", ".FIT", ".FITS"]:
# FITS table
dataDict, columnNameList = _ReadEllipse_fits(filename)
originalImage = None
else:
# text-file format
lines = open(filename).readlines()
# identify whether it's tprint or tdump output and read it in accordingly
if lines[0].startswith("# Table"):
# table was generated by tprint
dataDict, columnNameList = _ReadEllipse_tprint(lines)
originalImage = None
elif lines[0].startswith("SMA R %7.2f pixel"):
# table was generated by tdump
dataDict, columnNameList, originalImage = _ReadEllipse_tdump(lines)
# Post-processing:
# correct the position angles:
dataDict["raw_pa"] = dataDict["pa"]
dataDict["pa"] = CorrectPosAngle(dataDict["pa"], telPA, flip)
# provide more useful/predictable intensity-error key:
dataDict["intens_err"] = dataDict["int_err"]
# change SMA value from pixels to arcsec (or other user-defined unit):
if smaUnits is None:
if pix is None:
smaUnits = "pixels"
else:
smaUnits = "arc sec"
dataDict["sma_pix"] = dataDict["sma"]
userPixelScale = None
if pix is None:
pix = 1.0
else:
userPixelScale = pix
dataDict["sma"] = [ pix*sma for sma in dataDict["sma"] ]
# add list of column names in original order
columnNameList.append("raw_pa")
columnNameList.append("sma_pix")
# compute correct higher-order harmonic values, if present:
higherHarmonicsPresent = False
for rawnameHH in HIGHER_HARMONIC_RAWNAMES:
if rawnameHH in columnNameList:
higherHarmonicsPresent = True
if higherHarmonicsPresent is True:
sma = np.array(dataDict["sma_pix"])
gradient = np.array(dataDict["grad"])
sma_times_grad = sma*gradient
for rawHarmonic in HIGHER_HARMONIC_RAWNAMES:
if rawHarmonic in columnNameList:
rawData = np.array(dataDict[rawHarmonic])
rawErr = np.array(dataDict[rawHarmonic + "_err"])
# do conversion using -(raw_amplitude) [e.g., Eq.6 in Ciambur 2015]
scaledData = -rawData / sma_times_grad
scaledErr = rawErr / sma_times_grad
scaledName = SCALED_NAMES[rawHarmonic]
scaledErrName = scaledName + "_err"
dataDict[scaledName] = scaledData
dataDict[scaledErrName] = scaledErr
columnNameList.append(scaledName)
columnNameList.append(scaledErrName)
# Convert all columns to numpy arrays
for cname in columnNameList:
dataDict[cname] = np.array(dataDict[cname])
if ZP is not None:
dataDict["sb"] = ZP - 2.5*np.log10(dataDict['intens'])
columnNameList.append("sb")
# Generate semi-minor axis, axis ratio, equivalent radius req = sqrt(a^2 + b^2)
dataDict["b"] = (1.0 - dataDict["ellip"]) * dataDict["sma"]
dataDict["q"] = (1.0 - dataDict["ellip"])
dataDict["q_err"] = dataDict["ellip_err"]
dataDict["req"] = EquivRadius(dataDict)
columnNameList.append("b")
columnNameList.append("q")
columnNameList.append("q_err")
columnNameList.append("req")
dataDict["column_list"] = columnNameList
# Convert to datautils.ListDataFrame, if requested:
if dataFrame is True:
frameList = []
for cname in columnNameList:
frameList.append(dataDict[cname])
result = du.ListDataFrame(frameList, columnNameList)
# extra conveniences (aliased column names)
result.AddColumnName("sma", "a")
result.AddColumnName("intens", "i")
# add meta-data
result.tableFile = filename
result.sma_units = smaUnits
result.units_per_pix = userPixelScale
result.origImage = originalImage
result.zp_sb = ZP
else:
result = dataDict
return result |
Python | def ReplaceColumnsWithConstants( efit, colNameList, colValueList, smarange=None ):
"""Replace values in an ellipse-fit column with constant.
Given an ellipse-fit dictionary, for each column name in colNameList,
replace the existing values with the corresponding constant value in colValueList.
Optionally, the range in semi-major axis for which values are replaced can be
specified via smarange (only semi-major axis values >= smarange[0] and
<= smarange[1] will be affectd).
"""
if "sma" in efit.keys():
# IRAF-style ellipse fit
a = efit['sma']
else:
# probably Bender-format ellipse fit
if 'a' in efit.keys():
a = efit['a']
elif 'r' in efit.keys():
a = efit['r']
else:
print("UNABLE TO FIND SMA COLUMN IN ELLIPSE-FIT DICTIONARY!")
return None
nRows = len(a)
N = len(colNameList)
if smarange is not None:
amin = smarange[0]
amax = smarange[1]
iGood = [ i for i in range(nRows) if a[i] >= amin and a[i] <= amax ]
else:
iGood = range(nRows)
for i in iGood:
for j in range(N):
efit[colNameList[j]][i] = colValueList[j] | def ReplaceColumnsWithConstants( efit, colNameList, colValueList, smarange=None ):
"""Replace values in an ellipse-fit column with constant.
Given an ellipse-fit dictionary, for each column name in colNameList,
replace the existing values with the corresponding constant value in colValueList.
Optionally, the range in semi-major axis for which values are replaced can be
specified via smarange (only semi-major axis values >= smarange[0] and
<= smarange[1] will be affectd).
"""
if "sma" in efit.keys():
# IRAF-style ellipse fit
a = efit['sma']
else:
# probably Bender-format ellipse fit
if 'a' in efit.keys():
a = efit['a']
elif 'r' in efit.keys():
a = efit['r']
else:
print("UNABLE TO FIND SMA COLUMN IN ELLIPSE-FIT DICTIONARY!")
return None
nRows = len(a)
N = len(colNameList)
if smarange is not None:
amin = smarange[0]
amax = smarange[1]
iGood = [ i for i in range(nRows) if a[i] >= amin and a[i] <= amax ]
else:
iGood = range(nRows)
for i in iGood:
for j in range(N):
efit[colNameList[j]][i] = colValueList[j] |
Python | def MergeEllipseFits( efit1, efit2, transitionRadius ):
"""Merge two ellipse-fit dicts.
Given two IRAF-style ellipse-fit dictionaries (efit1 and efit2), return a
merged ellipse-fit dictionary where data from efit1 is used for a < transitionRadius
and data from efit2 is used for a > transitionRadius.
Does NOT work with datautils.ListDataFrame objects!
Parameters
----------
efit1, efit2 : dicts
dicts with ellipse-fit columns
Returns
-------
merged_efit : dict
dict with ellipse fit columns
"""
if type(efit1) is du.ListDataFrame:
dataFrame = True
columnNameList = efit1.colNames
else:
dataFrame = False
columnNameList = efit1["column_list"]
a1 = efit1['sma']
a2 = efit2['sma']
n1 = len(a1)
n2 = len(a2)
# check for bad inputs
if (transitionRadius < a1[0]) or (transitionRadius > a1[-1]):
print("Requested transition radius (%g) is outside boundaries of efit1 (%g--%g)!" % (transitionRadius,
a1[0], a1[-1]))
return None
if (transitionRadius < a2[0]) or (transitionRadius > a2[-1]):
print("Requested transition radius (%g) is outside boundaries of efit2 (%g--%g)!" % (transitionRadius,
a2[0], a2[-1]))
return None
efit1_border = NearestIndex(a1, transitionRadius, noPrint=True)
efit2_border = NearestIndex(a2, transitionRadius, noPrint=True)
end1 = efit1_border[1]
start2 = efit2_border[1]
if (a2[start2] <= a1[end1]):
start2 += 1
newDict = {}
for colName in columnNameList:
efit1vals = efit1[colName]
efit2vals = efit2[colName]
newDict[colName] = np.concatenate((efit1vals[0:end1], efit2vals[start2:]))
newDict["column_list"] = columnNameList
if dataFrame is True:
frameList = []
for cname in columnNameList:
frameList.append(newDict[cname])
result = du.ListDataFrame(frameList, columnNameList)
# extra conveninces
result.AddColumnName("sma", "a")
result.AddColumnName("intens", "i")
result.sma_units = efit1.sma_units
else:
result = newDict
return result | def MergeEllipseFits( efit1, efit2, transitionRadius ):
"""Merge two ellipse-fit dicts.
Given two IRAF-style ellipse-fit dictionaries (efit1 and efit2), return a
merged ellipse-fit dictionary where data from efit1 is used for a < transitionRadius
and data from efit2 is used for a > transitionRadius.
Does NOT work with datautils.ListDataFrame objects!
Parameters
----------
efit1, efit2 : dicts
dicts with ellipse-fit columns
Returns
-------
merged_efit : dict
dict with ellipse fit columns
"""
if type(efit1) is du.ListDataFrame:
dataFrame = True
columnNameList = efit1.colNames
else:
dataFrame = False
columnNameList = efit1["column_list"]
a1 = efit1['sma']
a2 = efit2['sma']
n1 = len(a1)
n2 = len(a2)
# check for bad inputs
if (transitionRadius < a1[0]) or (transitionRadius > a1[-1]):
print("Requested transition radius (%g) is outside boundaries of efit1 (%g--%g)!" % (transitionRadius,
a1[0], a1[-1]))
return None
if (transitionRadius < a2[0]) or (transitionRadius > a2[-1]):
print("Requested transition radius (%g) is outside boundaries of efit2 (%g--%g)!" % (transitionRadius,
a2[0], a2[-1]))
return None
efit1_border = NearestIndex(a1, transitionRadius, noPrint=True)
efit2_border = NearestIndex(a2, transitionRadius, noPrint=True)
end1 = efit1_border[1]
start2 = efit2_border[1]
if (a2[start2] <= a1[end1]):
start2 += 1
newDict = {}
for colName in columnNameList:
efit1vals = efit1[colName]
efit2vals = efit2[colName]
newDict[colName] = np.concatenate((efit1vals[0:end1], efit2vals[start2:]))
newDict["column_list"] = columnNameList
if dataFrame is True:
frameList = []
for cname in columnNameList:
frameList.append(newDict[cname])
result = du.ListDataFrame(frameList, columnNameList)
# extra conveninces
result.AddColumnName("sma", "a")
result.AddColumnName("intens", "i")
result.sma_units = efit1.sma_units
else:
result = newDict
return result |
Python | def IntensityFromRadius( ellipseFit, radius, ZP=None ):
"""Returns or estimates ellipse-fit intensity at a given radius.
Given an ellipse-fit dictionary or datautils.ListDataFrame object and a
user-specified "radius" (semi-major axis), this function extracts corresponding
intensity level for the ellipse with that semi-major axis (using spline
interpolation to get the intensity value if the radius is not explicitly in the
ellipse-fit object).
Returns intensity (in counts/pixel), unless ZP is specified, in which
case the result is in magnitudes (if ZP converts counts/pixel to mag/arcsec^2,
the result is mag/arcsec^2).
Parameters
----------
ellipseFit : dict or datautils.ListDataFrame object
radius : float or iterable
radius (or list, tuple, or numpy.ndarray) specifying where to estimate intensity
Returns
-------
result : float or numpy.ndarray
Returns float if `radius` was float, numpy.ndarray otherwise
"""
sma = np.array(ellipseFit['sma'])
badInput = False
if np.iterable(radius):
if (min(radius) < sma[0]) or (max(radius) > sma[-1]):
badInput = True
txt = "WARNING: requested radius array contains values outside"
elif (radius < sma[0]) or (radius > sma[-1]):
badInput = True
txt = "WARNING: requested radius (%g) is outside" % radius
if badInput:
txt += " ellipse-fit semi-major axis range (%g--%g)!" % (sma[0], sma[-1])
print(txt)
return None
intensity = np.array(ellipseFit['intens'])
#intensity_spline_func = spline.Spline(sma, intensity)
intensity_spline = interpolate.InterpolatedUnivariateSpline(sma, intensity)
newIntensity = intensity_spline(radius)
if ZP is None:
return newIntensity
else:
return (ZP - 2.5*np.log10(newIntensity)) | def IntensityFromRadius( ellipseFit, radius, ZP=None ):
"""Returns or estimates ellipse-fit intensity at a given radius.
Given an ellipse-fit dictionary or datautils.ListDataFrame object and a
user-specified "radius" (semi-major axis), this function extracts corresponding
intensity level for the ellipse with that semi-major axis (using spline
interpolation to get the intensity value if the radius is not explicitly in the
ellipse-fit object).
Returns intensity (in counts/pixel), unless ZP is specified, in which
case the result is in magnitudes (if ZP converts counts/pixel to mag/arcsec^2,
the result is mag/arcsec^2).
Parameters
----------
ellipseFit : dict or datautils.ListDataFrame object
radius : float or iterable
radius (or list, tuple, or numpy.ndarray) specifying where to estimate intensity
Returns
-------
result : float or numpy.ndarray
Returns float if `radius` was float, numpy.ndarray otherwise
"""
sma = np.array(ellipseFit['sma'])
badInput = False
if np.iterable(radius):
if (min(radius) < sma[0]) or (max(radius) > sma[-1]):
badInput = True
txt = "WARNING: requested radius array contains values outside"
elif (radius < sma[0]) or (radius > sma[-1]):
badInput = True
txt = "WARNING: requested radius (%g) is outside" % radius
if badInput:
txt += " ellipse-fit semi-major axis range (%g--%g)!" % (sma[0], sma[-1])
print(txt)
return None
intensity = np.array(ellipseFit['intens'])
#intensity_spline_func = spline.Spline(sma, intensity)
intensity_spline = interpolate.InterpolatedUnivariateSpline(sma, intensity)
newIntensity = intensity_spline(radius)
if ZP is None:
return newIntensity
else:
return (ZP - 2.5*np.log10(newIntensity)) |
Python | def ValueFromRadius( ellipseFit, radius, value="pa" ):
"""Returns or estimates ellipse-fit value at a given radius.
Given an ellipse-fit dictionary or datautils.ListDataFrame object and a
user-specified "radius" (semi-major axis), this function extracts the
corresponding parameter value for the ellipse with that semi-major axis
(using spline interpolation to get the value if the radius is not explicitly
in the ellipse-fit object).
value = string specifying which value to return (e.g., "ellip", "pa", etc.)
"""
sma = np.array(ellipseFit['sma'])
badInput = False
if np.iterable(radius):
if (min(radius) < sma[0]) or (max(radius) > sma[-1]):
badInput = True
txt = "WARNING: requested radius array contains values outside"
elif (radius < sma[0]) or (radius > sma[-1]):
badInput = True
txt = "WARNING: requested radius (%g) is outside" % radius
if badInput:
txt += " ellipse-fit semi-major axis range (%g--%g)!" % (sma[0], sma[-1])
print(txt)
return None
values = np.array(ellipseFit[value])
values_spline = interpolate.InterpolatedUnivariateSpline(sma, values)
newValue = values_spline(radius)
return newValue | def ValueFromRadius( ellipseFit, radius, value="pa" ):
"""Returns or estimates ellipse-fit value at a given radius.
Given an ellipse-fit dictionary or datautils.ListDataFrame object and a
user-specified "radius" (semi-major axis), this function extracts the
corresponding parameter value for the ellipse with that semi-major axis
(using spline interpolation to get the value if the radius is not explicitly
in the ellipse-fit object).
value = string specifying which value to return (e.g., "ellip", "pa", etc.)
"""
sma = np.array(ellipseFit['sma'])
badInput = False
if np.iterable(radius):
if (min(radius) < sma[0]) or (max(radius) > sma[-1]):
badInput = True
txt = "WARNING: requested radius array contains values outside"
elif (radius < sma[0]) or (radius > sma[-1]):
badInput = True
txt = "WARNING: requested radius (%g) is outside" % radius
if badInput:
txt += " ellipse-fit semi-major axis range (%g--%g)!" % (sma[0], sma[-1])
print(txt)
return None
values = np.array(ellipseFit[value])
values_spline = interpolate.InterpolatedUnivariateSpline(sma, values)
newValue = values_spline(radius)
return newValue |
Python | def GetStartParams( ellipseFit, a0=10, printCommand=False, useExactSma=False ):
"""
Extract the necessary start parameters for running doellipse: x0, y0,
pa0, ell0 given the specified semi-major axis (default = 10 pixels).
"""
doellipseTemplate = "doellipse xxx el_xxx {0} {1} {2} {3} {4}"
i0,i1 = NearestIndex(ellipseFit.a, a0, noPrint=True)
d1 = abs(ellipseFit.a[i0] - a0)
d2 = abs(ellipseFit.a[i1] - a0)
if (d1 < d2):
i = i0
else:
i = i1
(x0,y0,a00,pa0,ell0) = (ellipseFit.x0[i], ellipseFit.y0[i], a0,
ellipseFit.pa[i], ellipseFit.ellip[i])
if pa0 > 90:
pa0 = 90 - pa0
if useExactSma is True:
a00 = ellipseFit.a[i]
if printCommand is True:
txt = doellipseTemplate.format(x0,y0,a00,pa0,ell0)
print(txt)
return (x0,y0,a00,pa0,ell0) | def GetStartParams( ellipseFit, a0=10, printCommand=False, useExactSma=False ):
"""
Extract the necessary start parameters for running doellipse: x0, y0,
pa0, ell0 given the specified semi-major axis (default = 10 pixels).
"""
doellipseTemplate = "doellipse xxx el_xxx {0} {1} {2} {3} {4}"
i0,i1 = NearestIndex(ellipseFit.a, a0, noPrint=True)
d1 = abs(ellipseFit.a[i0] - a0)
d2 = abs(ellipseFit.a[i1] - a0)
if (d1 < d2):
i = i0
else:
i = i1
(x0,y0,a00,pa0,ell0) = (ellipseFit.x0[i], ellipseFit.y0[i], a0,
ellipseFit.pa[i], ellipseFit.ellip[i])
if pa0 > 90:
pa0 = 90 - pa0
if useExactSma is True:
a00 = ellipseFit.a[i]
if printCommand is True:
txt = doellipseTemplate.format(x0,y0,a00,pa0,ell0)
print(txt)
return (x0,y0,a00,pa0,ell0) |
Python | def WriteProfile( x, y, outputFilename ):
"""Take two input vectors x and y (integer or float) and write them
to a text file:
x y
"""
nX = len(x)
nY = len(y)
if (nX != nY):
msg = "WARNING: number of elements in x (%d) not same as number" % nX
msg += " of elements in y (%d)!\n" % nY
msg += "Nothing saved.\n"
print(msg)
return
nPts = min([nX, nY])
outf = open(outputFilename, 'w')
for i in xrange(nPts):
outf.write("%g\t\t%g\n" % (x[i], y[i]))
outf.close()
return | def WriteProfile( x, y, outputFilename ):
"""Take two input vectors x and y (integer or float) and write them
to a text file:
x y
"""
nX = len(x)
nY = len(y)
if (nX != nY):
msg = "WARNING: number of elements in x (%d) not same as number" % nX
msg += " of elements in y (%d)!\n" % nY
msg += "Nothing saved.\n"
print(msg)
return
nPts = min([nX, nY])
outf = open(outputFilename, 'w')
for i in xrange(nPts):
outf.write("%g\t\t%g\n" % (x[i], y[i]))
outf.close()
return |
Python | def WriteProfileFromDict( dataDict, outputFilename ):
"""Take a data dictionary from an ellipse fit and save I(a) in a text file:
a I(a)
"""
sma = dataDict["sma"]
I = dataDict["intens"]
WriteProfile(sma, I, outputFilename)
return | def WriteProfileFromDict( dataDict, outputFilename ):
"""Take a data dictionary from an ellipse fit and save I(a) in a text file:
a I(a)
"""
sma = dataDict["sma"]
I = dataDict["intens"]
WriteProfile(sma, I, outputFilename)
return |
Python | def NearestIndex( vector, value, noPrint=False, debug=0 ):
"""Returns nearest two indices to a specified value in a vector.
Given an input list or numpy 1-D array, which is asumed to be
monotonically increasing or decreasing, find the indices of the two points
with values closest to parameter 'value'."""
npts = len(vector)
if (value < min(vector)) or (value > max(vector)):
if noPrint:
return (None, None)
else:
print(" value %f lies outside range of input vector (%g to %g)!" % \
(value, min(vector), max(vector)))
return None
diff1 = vector[1] - vector[0]
if diff1 > 0:
# vector is increasing
Sign = 1
else:
# vector is decreasing
Sign = -1
i1 = i2 = 0
diff = Sign*(value - vector[0])
if debug: print(diff)
for i in range(1, npts):
newdiff = Sign*(value - vector[i])
if debug: print(i, newdiff)
if (newdiff > 0) and (newdiff <= diff):
diff = newdiff
i1 = i
else:
# we just crossed over
i2 = i
break
if noPrint is False:
print(" input_vector[%d,%d] = %g, %g" % (i1, i2, vector[i1], vector[i2]))
return (i1, i2) | def NearestIndex( vector, value, noPrint=False, debug=0 ):
"""Returns nearest two indices to a specified value in a vector.
Given an input list or numpy 1-D array, which is asumed to be
monotonically increasing or decreasing, find the indices of the two points
with values closest to parameter 'value'."""
npts = len(vector)
if (value < min(vector)) or (value > max(vector)):
if noPrint:
return (None, None)
else:
print(" value %f lies outside range of input vector (%g to %g)!" % \
(value, min(vector), max(vector)))
return None
diff1 = vector[1] - vector[0]
if diff1 > 0:
# vector is increasing
Sign = 1
else:
# vector is decreasing
Sign = -1
i1 = i2 = 0
diff = Sign*(value - vector[0])
if debug: print(diff)
for i in range(1, npts):
newdiff = Sign*(value - vector[i])
if debug: print(i, newdiff)
if (newdiff > 0) and (newdiff <= diff):
diff = newdiff
i1 = i
else:
# we just crossed over
i2 = i
break
if noPrint is False:
print(" input_vector[%d,%d] = %g, %g" % (i1, i2, vector[i1], vector[i2]))
return (i1, i2) |
Python | def WeightedFlux( dataDict ):
"""Given an input ellipse-fit stored in a dictionary, compute the approximate
total flux."""
sma = dataDict["sma"]
I = dataDict["intens"]
ellipticity = dataDict["ellip"]
npts = len(sma)
# start with flux inside innermost ellipse
innerFlux = dataDict["TFLUX_E"][0]
fluxSum = innerFlux
# now add up flux in elliptical annuli
for j in range(1, npts - 1):
dr = (sma[j + 1] - sma[j - 1])/2.0
# approximation to circumference of ellipse
a = sma[j]
b = a*(1.0 - ellipticity[j])
area = EllipseCircum(a,b) * dr
fluxSum += I[j]*area
# add outermost ellipse
dr = sma[npts - 1] - sma[npts - 2]
a = sma[npts - 1]
b = a*(1.0 - ellipticity[npts - 1])
area = EllipseCircum(a,b)
fluxSum += I[npts - 1]*area
return fluxSum | def WeightedFlux( dataDict ):
"""Given an input ellipse-fit stored in a dictionary, compute the approximate
total flux."""
sma = dataDict["sma"]
I = dataDict["intens"]
ellipticity = dataDict["ellip"]
npts = len(sma)
# start with flux inside innermost ellipse
innerFlux = dataDict["TFLUX_E"][0]
fluxSum = innerFlux
# now add up flux in elliptical annuli
for j in range(1, npts - 1):
dr = (sma[j + 1] - sma[j - 1])/2.0
# approximation to circumference of ellipse
a = sma[j]
b = a*(1.0 - ellipticity[j])
area = EllipseCircum(a,b) * dr
fluxSum += I[j]*area
# add outermost ellipse
dr = sma[npts - 1] - sma[npts - 2]
a = sma[npts - 1]
b = a*(1.0 - ellipticity[npts - 1])
area = EllipseCircum(a,b)
fluxSum += I[npts - 1]*area
return fluxSum |
Python | def EquivRadius( dataDict ):
"""Computes equivalent radius based on semi-major axis and ellipticity.
Given an input ellipse-fit stored in a dictionary, return a numpy array
of the equivalent radius [r_eq = sqrt(a*b)]."""
try:
a = np.array(dataDict["sma"])
except KeyError:
# maybe it's a Bender-format ellipse fit
a = np.array(dataDict["a"])
try:
ellipticity = np.array(dataDict["ellip"])
except KeyError:
ellipticity = np.array(dataDict["eps"])
# maybe it's a Bender-format ellipse fit
b = (1.0 - ellipticity)*a
r_eq = np.sqrt(a*b)
return r_eq | def EquivRadius( dataDict ):
"""Computes equivalent radius based on semi-major axis and ellipticity.
Given an input ellipse-fit stored in a dictionary, return a numpy array
of the equivalent radius [r_eq = sqrt(a*b)]."""
try:
a = np.array(dataDict["sma"])
except KeyError:
# maybe it's a Bender-format ellipse fit
a = np.array(dataDict["a"])
try:
ellipticity = np.array(dataDict["ellip"])
except KeyError:
ellipticity = np.array(dataDict["eps"])
# maybe it's a Bender-format ellipse fit
b = (1.0 - ellipticity)*a
r_eq = np.sqrt(a*b)
return r_eq |
Python | def PlotHarmonicMomentErrors( efit, m, smaName='sma', errSuffix="_err", xlog=False,
ylog=False, xmark=None, xmarkColor=None, plotColor="k", ymark=None,
ymarkColor='k', smaScale=1.0, useSqrt=True ):
"""NOTE: calculation and plotting of error bars not yet implemented!
"""
a = smaScale * np.array(efit[smaName])
sinHarmonicName = "a%1d" % m
cosHarmonicName = "b%1d" % m
aVals = np.array(efit[sinHarmonicName])
bVals = np.array(efit[cosHarmonicName])
if useSqrt is True:
y = np.sqrt(aVals**2 + bVals**2)
else:
y = aVals**2 + bVals**2
if xlog is True:
# make x-axis tick marks larger
plt.tick_params(axis="x", which="major", length=10)
plt.tick_params(axis="x", which="minor", length=5)
if xlog is True and ylog is False:
plt.semilogx(a, y, color=plotColor)
elif xlog is False and ylog is True:
plt.semilogy(a, y, color=plotColor)
elif xlog is True and ylog is True:
plt.loglog(a, y, color=plotColor)
else:
plt.plot(a, y, color=plotColor)
# plt.errorbar(a, y, yerr=y_err, fmt="o", color=plotColor, ms=2)
if xmark is not None:
if xmarkColor is None:
xmarkColor = plotColor
if type(xmark) in [int, float]:
plt.axvline(xmark, ls="--", color=xmarkColor)
else:
nXmarks = len(xmark)
if type(xmarkColor) is not list:
xmarkColors = [xmarkColor]*nXmarks
else:
xmarkColors = xmarkColor
for i in range(len(xmark)):
plt.axvline(xmark[i], ls="--", color=xmarkColors[i])
if ymark is not None:
if type(ymark) in [int, float]:
plt.axhline(ymark, ls="--", color=ymarkColor)
else:
for y in ymark:
plt.axhline(y, ls="--", color=ymarkColor) | def PlotHarmonicMomentErrors( efit, m, smaName='sma', errSuffix="_err", xlog=False,
ylog=False, xmark=None, xmarkColor=None, plotColor="k", ymark=None,
ymarkColor='k', smaScale=1.0, useSqrt=True ):
"""NOTE: calculation and plotting of error bars not yet implemented!
"""
a = smaScale * np.array(efit[smaName])
sinHarmonicName = "a%1d" % m
cosHarmonicName = "b%1d" % m
aVals = np.array(efit[sinHarmonicName])
bVals = np.array(efit[cosHarmonicName])
if useSqrt is True:
y = np.sqrt(aVals**2 + bVals**2)
else:
y = aVals**2 + bVals**2
if xlog is True:
# make x-axis tick marks larger
plt.tick_params(axis="x", which="major", length=10)
plt.tick_params(axis="x", which="minor", length=5)
if xlog is True and ylog is False:
plt.semilogx(a, y, color=plotColor)
elif xlog is False and ylog is True:
plt.semilogy(a, y, color=plotColor)
elif xlog is True and ylog is True:
plt.loglog(a, y, color=plotColor)
else:
plt.plot(a, y, color=plotColor)
# plt.errorbar(a, y, yerr=y_err, fmt="o", color=plotColor, ms=2)
if xmark is not None:
if xmarkColor is None:
xmarkColor = plotColor
if type(xmark) in [int, float]:
plt.axvline(xmark, ls="--", color=xmarkColor)
else:
nXmarks = len(xmark)
if type(xmarkColor) is not list:
xmarkColors = [xmarkColor]*nXmarks
else:
xmarkColors = xmarkColor
for i in range(len(xmark)):
plt.axvline(xmark[i], ls="--", color=xmarkColors[i])
if ymark is not None:
if type(ymark) in [int, float]:
plt.axhline(ymark, ls="--", color=ymarkColor)
else:
for y in ymark:
plt.axhline(y, ls="--", color=ymarkColor) |
Python | def GetMaxHarmonic( efit ):
"""Determine highest-order of harmonic amplitudes in an ellipse-fit object"""
# We assume that columns named "ai3_err", "ai4_err", "ai5_err", etc.
# exist, up to "aiM_err", where M is the maximum harmonic number
momentNums = [int(cname.rstrip("_err")[2:]) for cname in efit.colNames
if cname[:2] == "ai" and cname[-4:] == "_err"]
return max(momentNums) | def GetMaxHarmonic( efit ):
"""Determine highest-order of harmonic amplitudes in an ellipse-fit object"""
# We assume that columns named "ai3_err", "ai4_err", "ai5_err", etc.
# exist, up to "aiM_err", where M is the maximum harmonic number
momentNums = [int(cname.rstrip("_err")[2:]) for cname in efit.colNames
if cname[:2] == "ai" and cname[-4:] == "_err"]
return max(momentNums) |
Python | def PlotHigherMoment( efit, m=4, xlog=False, xrange=None, yrange=None,
xmark=None, maintitle=None, xtitle=None, plotColorList=['k', 'r', 'g', 'b'],
noErase=False, labelSize=12, smaScale=1.0 ):
"""Plot 2nd moment of higher-order harmonics (e.g., A_4^2 + B_4^2) for one or more
ellipse-fit objects. Meant for use with IRAF ellipse fits.
efit can be a single ellipse-fit object [generated by ReadEllipse] or a list of
such objects.
Defaults to m=4.
plotColorList = optional list of color specifications for use when plotting
a list of ellipse-fit objects.
labelSize = sizes of x- and y-axis labels (in points)
smaScale = conversion from arc seconds (or pixel) to desired linear scale
(e.g., parsecs or kpc) for the x-axis.
"""
if isinstance(efit, list):
nEfits = len(efit)
plotList = True
else:
plotList = False
if (m < 3) or (m > 8):
if (m < 3):
print("*** WARNING: m < 3 harmonic components not defined! ***")
else:
print("*** WARNING: m > 8 harmonic components not currently supported! ***")
return None
sinHarmonicName = "a%1d" % m
sinHarmonicLabel = "A%1d" % m
cosHarmonicName = "b%1d" % m
if noErase is False:
plt.clf()
if not plotList:
PlotHarmonicMomentErrors(efit, m, xlog=xlog, xmark=xmark, smaScale=smaScale)
else:
for i in range(nEfits):
PlotHarmonicMomentErrors(efit[i], m, xlog=xlog, xmark=xmark,
plotColor=plotColorList[i], smaScale=smaScale)
if xrange is None:
xrange = plt.xlim()
if (xrange is not None):
plt.xlim(xrange[0], xrange[1])
if (yrange is not None):
plt.ylim(yrange[0], yrange[1])
if maintitle is not None:
plt.title(maintitle) | def PlotHigherMoment( efit, m=4, xlog=False, xrange=None, yrange=None,
xmark=None, maintitle=None, xtitle=None, plotColorList=['k', 'r', 'g', 'b'],
noErase=False, labelSize=12, smaScale=1.0 ):
"""Plot 2nd moment of higher-order harmonics (e.g., A_4^2 + B_4^2) for one or more
ellipse-fit objects. Meant for use with IRAF ellipse fits.
efit can be a single ellipse-fit object [generated by ReadEllipse] or a list of
such objects.
Defaults to m=4.
plotColorList = optional list of color specifications for use when plotting
a list of ellipse-fit objects.
labelSize = sizes of x- and y-axis labels (in points)
smaScale = conversion from arc seconds (or pixel) to desired linear scale
(e.g., parsecs or kpc) for the x-axis.
"""
if isinstance(efit, list):
nEfits = len(efit)
plotList = True
else:
plotList = False
if (m < 3) or (m > 8):
if (m < 3):
print("*** WARNING: m < 3 harmonic components not defined! ***")
else:
print("*** WARNING: m > 8 harmonic components not currently supported! ***")
return None
sinHarmonicName = "a%1d" % m
sinHarmonicLabel = "A%1d" % m
cosHarmonicName = "b%1d" % m
if noErase is False:
plt.clf()
if not plotList:
PlotHarmonicMomentErrors(efit, m, xlog=xlog, xmark=xmark, smaScale=smaScale)
else:
for i in range(nEfits):
PlotHarmonicMomentErrors(efit[i], m, xlog=xlog, xmark=xmark,
plotColor=plotColorList[i], smaScale=smaScale)
if xrange is None:
xrange = plt.xlim()
if (xrange is not None):
plt.xlim(xrange[0], xrange[1])
if (yrange is not None):
plt.ylim(yrange[0], yrange[1])
if maintitle is not None:
plt.title(maintitle) |
Python | def DrawOneEllipse( x, y, a, pa, ell, color='r', lw=2, alpha=0.5 ):
"""
Draw an ellipse on a pre-existing plot.
Draws an ellipse on a figure, centered at (x,y) [data coordinates] with
semi-major axis a, position angle pa [in usual astronomial sense of CCW
from +y axis], and ellipticity ell.
color, lw, alpha: standard matplotlib parameters for the ellipses
"""
w = 2*a
h = (1 - ell)*w
pa_adj = 90.0 + pa
ellipseObj = Ellipse(xy=(x,y), width=w, height=h, angle=pa_adj,
edgecolor=color, fc='None', lw=lw, alpha=alpha)
ax = plt.gca()
ax.add_patch(ellipseObj) | def DrawOneEllipse( x, y, a, pa, ell, color='r', lw=2, alpha=0.5 ):
"""
Draw an ellipse on a pre-existing plot.
Draws an ellipse on a figure, centered at (x,y) [data coordinates] with
semi-major axis a, position angle pa [in usual astronomial sense of CCW
from +y axis], and ellipticity ell.
color, lw, alpha: standard matplotlib parameters for the ellipses
"""
w = 2*a
h = (1 - ell)*w
pa_adj = 90.0 + pa
ellipseObj = Ellipse(xy=(x,y), width=w, height=h, angle=pa_adj,
edgecolor=color, fc='None', lw=lw, alpha=alpha)
ax = plt.gca()
ax.add_patch(ellipseObj) |
Python | def DrawEllipses( ellipseFit, start=None, step=None, end=None, xc=0, yc=0,
rawPix=False, color='r', lw=1, alpha=0.5 ):
"""Draw multiple ellipses from an ellipse-fit object on a pre-existing plot.
Given an ellipse-fit object, this function draws ellipses on the current
plot corresponding to the ellipses specified in the ellipse-fit object
ellipseFit: ellipse-fit object (e.g., generated by ReadEllipse)
start: initial index to plot [default = 0]
step: delta-index for plotted ellipses [default = 1]
end: final sma index to plot [default = n_elements in ellipseFit]
xc,yc: reference center coordinates (will be subtracted from ellipse center
coordinates before plotting)
rawPix: True to use raw pixel values instead of sma values from ellipse fit
color, lw, alpha: standard matplotlib parameters for the ellipses
"""
x0 = ellipseFit.x0
y0 = ellipseFit.y0
if rawPix is True:
a = ellipseFit.sma_pix
else:
a = ellipseFit.sma
ell = ellipseFit.ellip
pa = ellipseFit.pa
nEllipses = len(x0)
if start is None:
i0 = 0
else:
i0 = start
if end is None:
i1 = nEllipses
else:
i1 = end
if step is None:
delta_i = 1
else:
delta_i = step
for i in range(i0, i1, delta_i):
DrawOneEllipse(x0[i] - xc, y0[i] - yc, a[i], pa[i], ell[i], color=color, lw=lw,
alpha=alpha) | def DrawEllipses( ellipseFit, start=None, step=None, end=None, xc=0, yc=0,
rawPix=False, color='r', lw=1, alpha=0.5 ):
"""Draw multiple ellipses from an ellipse-fit object on a pre-existing plot.
Given an ellipse-fit object, this function draws ellipses on the current
plot corresponding to the ellipses specified in the ellipse-fit object
ellipseFit: ellipse-fit object (e.g., generated by ReadEllipse)
start: initial index to plot [default = 0]
step: delta-index for plotted ellipses [default = 1]
end: final sma index to plot [default = n_elements in ellipseFit]
xc,yc: reference center coordinates (will be subtracted from ellipse center
coordinates before plotting)
rawPix: True to use raw pixel values instead of sma values from ellipse fit
color, lw, alpha: standard matplotlib parameters for the ellipses
"""
x0 = ellipseFit.x0
y0 = ellipseFit.y0
if rawPix is True:
a = ellipseFit.sma_pix
else:
a = ellipseFit.sma
ell = ellipseFit.ellip
pa = ellipseFit.pa
nEllipses = len(x0)
if start is None:
i0 = 0
else:
i0 = start
if end is None:
i1 = nEllipses
else:
i1 = end
if step is None:
delta_i = 1
else:
delta_i = step
for i in range(i0, i1, delta_i):
DrawOneEllipse(x0[i] - xc, y0[i] - yc, a[i], pa[i], ell[i], color=color, lw=lw,
alpha=alpha) |
Python | def minpoint( vector, value ):
"""For a monotonically increasing vector, returns the smallest index
for which vector[index] > value."""
inds = [i for i in range(len(vector)) if vector[i] > value]
return inds[0] | def minpoint( vector, value ):
"""For a monotonically increasing vector, returns the smallest index
for which vector[index] > value."""
inds = [i for i in range(len(vector)) if vector[i] > value]
return inds[0] |
Python | def ReadBenderEllipse( filename, dataFrame=False, headerLine=None, useDefaultColumnNames=True ):
"""Read in an ellipse fit generated by Bender/Saglia code and store it
in a dictionary (or, optionally, a ListDataFrame object). Columns are
converted to 1-D numpy arrays.
headerLine indicates which line contains the column titles (first line = 1,
etc.); the actual data is assumed to start immediately after.
Normally, the function attempts to locate the header line automatically
(first line in file with same number of elements [excluding any initial "#"]
as last line in file). The headerLine keyword is mainly useful for perverse
situations (e.g., there is a line in the header that happens to have 12
words in it).
Because we (currently) don't know how the Bender code handles position
angles, we don't attempt to "correct" the PA.
"""
lines = open(filename).readlines()
nTotalLines = len(lines)
lastLine = lines[-1]
nCols = len(lastLine.split())
# find the header line -- should be first line which has same number of elements
# as the last line in the file
if headerLine is None:
headerString = None
for i in range(nTotalLines):
tmpLine = lines[i].lstrip("#")
if len(tmpLine.split()) == nCols:
headerString = tmpLine
headerLineIndex = i
break
if headerString is None:
print("Unable to find header line!\n")
return None
else:
headerLineIndex = headerLine - 1
headerString = lines[headerLineIndex]
if useDefaultColumnNames:
colheaders = DEFAULT_BENDER_COLUMNS
else:
colheaders = headerString.split()
# get rid of excess space at end, if any
colheaders[-1] = colheaders[-1].strip()
# find first data line:
firstDataLine = None
for j in range(headerLineIndex + 1, nTotalLines):
tmpLine = lines[j]
if len(tmpLine.split()) == nCols:
firstDataLine = j
break
if firstDataLine is None:
print("Unable to find first data line!\n")
return None
dataLines = [line for line in lines[firstDataLine:] if line[0] != "#"]
nDataLines = len(dataLines)
dataDict = {}
for i in range(nCols):
cname = colheaders[i]
dataDict[cname] = np.array([ float(line.split()[i]) for line in dataLines ])
dataDict["req"] = EquivRadius(dataDict)
colheaders.append("req")
# Convert to dataFrame, if requested:
if dataFrame is True:
frameList = []
for cname in colheaders:
frameList.append(dataDict[cname])
result = du.ListDataFrame(frameList, colheaders)
# extra conveninces
#result.AddColumnName("sma", "a")
#result.AddColumnName("intens", "i")
# add meta-data
result.tableFile = filename
else:
result = dataDict
return result | def ReadBenderEllipse( filename, dataFrame=False, headerLine=None, useDefaultColumnNames=True ):
"""Read in an ellipse fit generated by Bender/Saglia code and store it
in a dictionary (or, optionally, a ListDataFrame object). Columns are
converted to 1-D numpy arrays.
headerLine indicates which line contains the column titles (first line = 1,
etc.); the actual data is assumed to start immediately after.
Normally, the function attempts to locate the header line automatically
(first line in file with same number of elements [excluding any initial "#"]
as last line in file). The headerLine keyword is mainly useful for perverse
situations (e.g., there is a line in the header that happens to have 12
words in it).
Because we (currently) don't know how the Bender code handles position
angles, we don't attempt to "correct" the PA.
"""
lines = open(filename).readlines()
nTotalLines = len(lines)
lastLine = lines[-1]
nCols = len(lastLine.split())
# find the header line -- should be first line which has same number of elements
# as the last line in the file
if headerLine is None:
headerString = None
for i in range(nTotalLines):
tmpLine = lines[i].lstrip("#")
if len(tmpLine.split()) == nCols:
headerString = tmpLine
headerLineIndex = i
break
if headerString is None:
print("Unable to find header line!\n")
return None
else:
headerLineIndex = headerLine - 1
headerString = lines[headerLineIndex]
if useDefaultColumnNames:
colheaders = DEFAULT_BENDER_COLUMNS
else:
colheaders = headerString.split()
# get rid of excess space at end, if any
colheaders[-1] = colheaders[-1].strip()
# find first data line:
firstDataLine = None
for j in range(headerLineIndex + 1, nTotalLines):
tmpLine = lines[j]
if len(tmpLine.split()) == nCols:
firstDataLine = j
break
if firstDataLine is None:
print("Unable to find first data line!\n")
return None
dataLines = [line for line in lines[firstDataLine:] if line[0] != "#"]
nDataLines = len(dataLines)
dataDict = {}
for i in range(nCols):
cname = colheaders[i]
dataDict[cname] = np.array([ float(line.split()[i]) for line in dataLines ])
dataDict["req"] = EquivRadius(dataDict)
colheaders.append("req")
# Convert to dataFrame, if requested:
if dataFrame is True:
frameList = []
for cname in colheaders:
frameList.append(dataDict[cname])
result = du.ListDataFrame(frameList, colheaders)
# extra conveninces
#result.AddColumnName("sma", "a")
#result.AddColumnName("intens", "i")
# add meta-data
result.tableFile = filename
else:
result = dataDict
return result |
Python | def WriteBenderEllipse( ellipseFit, outputFilename ):
"""Given an ellipseFit dictionary containing Bender-format entries (as generated by
ReadBenderEllipse), write the ellipse fit to a text file.
ASSUMES THAT ellipseFit IS ALREADY IN BENDER FORMAT!
"""
outputLines = [COLUMNS_BENDER, "#\n"]
smaKey = 'a'
if 'a' not in ellipseFit.keys():
smaKey = 'r'
nDataLines = len(ellipseFit[smaKey])
for i in range(nDataLines):
a = ellipseFit[smaKey][i]
b = ellipseFit['b'][i]
sb = ellipseFit['sb'][i]
eps = ellipseFit['eps'][i]
deps = ellipseFit['deps'][i]
pa = ellipseFit['pa'][i]
dpa = ellipseFit['dpa'][i]
a2 = ellipseFit['a2'][i]
a4 = ellipseFit['a4'][i]
a6 = ellipseFit['a6'][i]
a8 = ellipseFit['a8'][i]
a10 = ellipseFit['a10'][i]
a12 = ellipseFit['a12'][i]
t = ellipseFit['t'][i]
outLine = OUTPUT_FMT_BENDER_ALL % (a,b,sb,eps,deps,pa,dpa,a2,a4,a6,a8,a10,a12,t)
outputLines.append(outLine)
outf = open(outputFilename, 'w')
for line in outputLines: outf.write(line)
outf.close() | def WriteBenderEllipse( ellipseFit, outputFilename ):
"""Given an ellipseFit dictionary containing Bender-format entries (as generated by
ReadBenderEllipse), write the ellipse fit to a text file.
ASSUMES THAT ellipseFit IS ALREADY IN BENDER FORMAT!
"""
outputLines = [COLUMNS_BENDER, "#\n"]
smaKey = 'a'
if 'a' not in ellipseFit.keys():
smaKey = 'r'
nDataLines = len(ellipseFit[smaKey])
for i in range(nDataLines):
a = ellipseFit[smaKey][i]
b = ellipseFit['b'][i]
sb = ellipseFit['sb'][i]
eps = ellipseFit['eps'][i]
deps = ellipseFit['deps'][i]
pa = ellipseFit['pa'][i]
dpa = ellipseFit['dpa'][i]
a2 = ellipseFit['a2'][i]
a4 = ellipseFit['a4'][i]
a6 = ellipseFit['a6'][i]
a8 = ellipseFit['a8'][i]
a10 = ellipseFit['a10'][i]
a12 = ellipseFit['a12'][i]
t = ellipseFit['t'][i]
outLine = OUTPUT_FMT_BENDER_ALL % (a,b,sb,eps,deps,pa,dpa,a2,a4,a6,a8,a10,a12,t)
outputLines.append(outLine)
outf = open(outputFilename, 'w')
for line in outputLines: outf.write(line)
outf.close() |
Python | def ConvertBenderToIraf( benderEfit, dataFrame=True ):
"""Given an ellipse-fit DataFrame object generated by reading a Bender/Saglia
ellipse-fit file, generate a new DataFrame object in IRAF ellipse-fit format.
a4,a6,a8,a10,a12 are converted to IRAF-format B4,B6,etc. (errors for these
quantities are set to 0, since the
"""
a = benderEfit['a']
ell = benderEfit['eps']
nPts = len(a)
irafDict = {}
irafDict['sma'] = benderEfit['a']
irafDict['sb'] = benderEfit['sb']
irafDict['ellip'] = benderEfit['eps']
irafDict['ellip_err'] = benderEfit['deps']
irafDict['pa'] = benderEfit['pa']
irafDict['pa_err'] = benderEfit['dpa']
irafDict['b4'] = ConvertHigherOrder_Bender2Iraf(a, ell, benderEfit['a4'])
irafDict['b6'] = ConvertHigherOrder_Bender2Iraf(a, ell, benderEfit['a6'])
irafDict['b8'] = ConvertHigherOrder_Bender2Iraf(a, ell, benderEfit['a8'])
irafDict['b10'] = ConvertHigherOrder_Bender2Iraf(a, ell, benderEfit['a10'])
irafDict['b12'] = ConvertHigherOrder_Bender2Iraf(a, ell, benderEfit['a12'])
# add in missing columns, filled with zeros
for cname in IRAF_COLNAMES_B:
if cname not in BENDER_COLUMNS_AS_IRAF:
irafDict[cname] = np.zeros(nPts)
# Convert to dataFrame, if requested:
if dataFrame is True:
frameList = []
columnNameList = IRAF_COLNAMES_B
for cname in columnNameList:
frameList.append(irafDict[cname])
result = du.ListDataFrame(frameList, columnNameList)
# extra conveninces
result.AddColumnName("sma", "a")
result.AddColumnName("intens", "i")
else:
result = irafDict
return result | def ConvertBenderToIraf( benderEfit, dataFrame=True ):
"""Given an ellipse-fit DataFrame object generated by reading a Bender/Saglia
ellipse-fit file, generate a new DataFrame object in IRAF ellipse-fit format.
a4,a6,a8,a10,a12 are converted to IRAF-format B4,B6,etc. (errors for these
quantities are set to 0, since the
"""
a = benderEfit['a']
ell = benderEfit['eps']
nPts = len(a)
irafDict = {}
irafDict['sma'] = benderEfit['a']
irafDict['sb'] = benderEfit['sb']
irafDict['ellip'] = benderEfit['eps']
irafDict['ellip_err'] = benderEfit['deps']
irafDict['pa'] = benderEfit['pa']
irafDict['pa_err'] = benderEfit['dpa']
irafDict['b4'] = ConvertHigherOrder_Bender2Iraf(a, ell, benderEfit['a4'])
irafDict['b6'] = ConvertHigherOrder_Bender2Iraf(a, ell, benderEfit['a6'])
irafDict['b8'] = ConvertHigherOrder_Bender2Iraf(a, ell, benderEfit['a8'])
irafDict['b10'] = ConvertHigherOrder_Bender2Iraf(a, ell, benderEfit['a10'])
irafDict['b12'] = ConvertHigherOrder_Bender2Iraf(a, ell, benderEfit['a12'])
# add in missing columns, filled with zeros
for cname in IRAF_COLNAMES_B:
if cname not in BENDER_COLUMNS_AS_IRAF:
irafDict[cname] = np.zeros(nPts)
# Convert to dataFrame, if requested:
if dataFrame is True:
frameList = []
columnNameList = IRAF_COLNAMES_B
for cname in columnNameList:
frameList.append(irafDict[cname])
result = du.ListDataFrame(frameList, columnNameList)
# extra conveninces
result.AddColumnName("sma", "a")
result.AddColumnName("intens", "i")
else:
result = irafDict
return result |
Python | def ConvertIrafToBender( irafEfit, irafColnames=None, dataFrame=True ):
"""Given an ellipse-fit DataFrame object generated by reading an IRAF
ellipse-fit file, generate a new DataFrame object in (abridged) Bender
ellipse-fit format.
You can *also* use a simple Python dictionary for irafEfit, but in this
case you *must* supply a (possibly empty) list via the keyword irafColnames.
This list needs to contain the keys of any even high-order [cos(m theta), with
m >= 6] terms that are in the input dictionary and should be translated to Bender
ellipse-fit form; if the only higher-order term is m=4 ('b4'), then irafColnames
can be an empty list []. For example, if B6 and B8 terms are present (indexed
via ['b6'] and ['b8'], then use irafColnames=['b6', 'b8'].
If supplying a Python dictionary instead of an ellipse-fit DataFrame, the
dictionary *must*, at a minimum, have numpy arrays indexed with the following keys:
'a', 'sb', 'ellip', 'ellip_err', 'pa', 'pa_err', and 'b4'
(corresponding to semi-major axis, surface-brightness, ellipticity & error,
position angle & error, and cos(4 theta) term).
B4,B6,etc. are converted to Bender-style a4,a6,etc.
"""
a = irafEfit['a']
ell = irafEfit['ellip']
if irafColnames is None:
irafColnames = irafEfit.colNames
nPts = len(a)
benderDict = {}
benderDict['a'] = irafEfit['a']
benderDict['b'] = a * (1.0 - ell)
benderDict['sb'] = irafEfit['sb']
benderDict['eps'] = irafEfit['ellip']
benderDict['deps'] = irafEfit['ellip_err']
benderDict['pa'] = irafEfit['pa']
benderDict['dpa'] = irafEfit['pa_err']
benderDict['a2'] = np.zeros(nPts)
benderDict['a4'] = ConvertHigherOrder_Iraf2Bender(a, ell, irafEfit['b4'])
if 'b6' in irafColnames:
benderDict['a6'] = ConvertHigherOrder_Iraf2Bender(a, ell, irafEfit['b6'])
else:
benderDict['a6'] = np.zeros(nPts)
if 'b8' in irafColnames:
benderDict['a8'] = ConvertHigherOrder_Iraf2Bender(a, ell, irafEfit['b8'])
else:
benderDict['a8'] = np.zeros(nPts)
if 'b10' in irafColnames:
benderDict['a10'] = ConvertHigherOrder_Iraf2Bender(a, ell, irafEfit['b10'])
else:
benderDict['a10'] = np.zeros(nPts)
if 'b12' in irafColnames:
benderDict['a12'] = ConvertHigherOrder_Iraf2Bender(a, ell, irafEfit['b12'])
else:
benderDict['a12'] = np.zeros(nPts)
benderDict['t'] = np.zeros(nPts)
# Convert to dataFrame, if requested:
if dataFrame is True:
frameList = []
columnNameList = DEFAULT_BENDER_COLUMNS
for cname in columnNameList:
frameList.append(benderDict[cname])
result = du.ListDataFrame(frameList, columnNameList)
else:
result = benderDict
return result | def ConvertIrafToBender( irafEfit, irafColnames=None, dataFrame=True ):
"""Given an ellipse-fit DataFrame object generated by reading an IRAF
ellipse-fit file, generate a new DataFrame object in (abridged) Bender
ellipse-fit format.
You can *also* use a simple Python dictionary for irafEfit, but in this
case you *must* supply a (possibly empty) list via the keyword irafColnames.
This list needs to contain the keys of any even high-order [cos(m theta), with
m >= 6] terms that are in the input dictionary and should be translated to Bender
ellipse-fit form; if the only higher-order term is m=4 ('b4'), then irafColnames
can be an empty list []. For example, if B6 and B8 terms are present (indexed
via ['b6'] and ['b8'], then use irafColnames=['b6', 'b8'].
If supplying a Python dictionary instead of an ellipse-fit DataFrame, the
dictionary *must*, at a minimum, have numpy arrays indexed with the following keys:
'a', 'sb', 'ellip', 'ellip_err', 'pa', 'pa_err', and 'b4'
(corresponding to semi-major axis, surface-brightness, ellipticity & error,
position angle & error, and cos(4 theta) term).
B4,B6,etc. are converted to Bender-style a4,a6,etc.
"""
a = irafEfit['a']
ell = irafEfit['ellip']
if irafColnames is None:
irafColnames = irafEfit.colNames
nPts = len(a)
benderDict = {}
benderDict['a'] = irafEfit['a']
benderDict['b'] = a * (1.0 - ell)
benderDict['sb'] = irafEfit['sb']
benderDict['eps'] = irafEfit['ellip']
benderDict['deps'] = irafEfit['ellip_err']
benderDict['pa'] = irafEfit['pa']
benderDict['dpa'] = irafEfit['pa_err']
benderDict['a2'] = np.zeros(nPts)
benderDict['a4'] = ConvertHigherOrder_Iraf2Bender(a, ell, irafEfit['b4'])
if 'b6' in irafColnames:
benderDict['a6'] = ConvertHigherOrder_Iraf2Bender(a, ell, irafEfit['b6'])
else:
benderDict['a6'] = np.zeros(nPts)
if 'b8' in irafColnames:
benderDict['a8'] = ConvertHigherOrder_Iraf2Bender(a, ell, irafEfit['b8'])
else:
benderDict['a8'] = np.zeros(nPts)
if 'b10' in irafColnames:
benderDict['a10'] = ConvertHigherOrder_Iraf2Bender(a, ell, irafEfit['b10'])
else:
benderDict['a10'] = np.zeros(nPts)
if 'b12' in irafColnames:
benderDict['a12'] = ConvertHigherOrder_Iraf2Bender(a, ell, irafEfit['b12'])
else:
benderDict['a12'] = np.zeros(nPts)
benderDict['t'] = np.zeros(nPts)
# Convert to dataFrame, if requested:
if dataFrame is True:
frameList = []
columnNameList = DEFAULT_BENDER_COLUMNS
for cname in columnNameList:
frameList.append(benderDict[cname])
result = du.ListDataFrame(frameList, columnNameList)
else:
result = benderDict
return result |
Python | def testReadEllipse_fluxconv( self ):
"""Test that we convert intensities to arbitrary fluxes, given an input
flux conversion factor."""
for i in range(5):
deltaRel = abs((n5831_flux[i] -
self.n5831efit_fits_sb['flux'][i]) / n5831_flux[i])
self.assertLess(deltaRel, 1e-6)
# check associated metatdata
correct = 100.0
# WARNING: NON-DICT ATTRIBUTE
self.assertEqual(correct, self.n5831efit_fits_sb.fluxconv)
# check associated metatdata
correct = "blah"
# WARNING: NON-DICT ATTRIBUTE
self.assertEqual(correct, self.n5831efit_fits_sb.flux_units) | def testReadEllipse_fluxconv( self ):
"""Test that we convert intensities to arbitrary fluxes, given an input
flux conversion factor."""
for i in range(5):
deltaRel = abs((n5831_flux[i] -
self.n5831efit_fits_sb['flux'][i]) / n5831_flux[i])
self.assertLess(deltaRel, 1e-6)
# check associated metatdata
correct = 100.0
# WARNING: NON-DICT ATTRIBUTE
self.assertEqual(correct, self.n5831efit_fits_sb.fluxconv)
# check associated metatdata
correct = "blah"
# WARNING: NON-DICT ATTRIBUTE
self.assertEqual(correct, self.n5831efit_fits_sb.flux_units) |
Python | def testReadEllipse_Ell( self ):
"""Test that we correctly read in ellipticity values, and also that we
correctly convert these to q (= b/a) and r_eq values. """
# test for ellipticity values
for i in range(5):
deltaRel = abs((n5831_startEll[i] -
self.n5831efit_fits['ellip'][i]) / n5831_startEll[i])
self.assertLess(deltaRel, 1e-6)
# test for axis-ratio values
for i in range(5):
deltaRel = abs((n5831_startq[i] - self.n5831efit_fits['q'][i]) / n5831_startq[i])
self.assertLess(deltaRel, 1e-6)
# test for r_eq values
for i in range(5):
deltaRel = abs((n5831_startReq[i] - self.n5831efit_fits['r_eq'][i]) / n5831_startReq[i])
self.assertLess(deltaRel, 1e-6) | def testReadEllipse_Ell( self ):
"""Test that we correctly read in ellipticity values, and also that we
correctly convert these to q (= b/a) and r_eq values. """
# test for ellipticity values
for i in range(5):
deltaRel = abs((n5831_startEll[i] -
self.n5831efit_fits['ellip'][i]) / n5831_startEll[i])
self.assertLess(deltaRel, 1e-6)
# test for axis-ratio values
for i in range(5):
deltaRel = abs((n5831_startq[i] - self.n5831efit_fits['q'][i]) / n5831_startq[i])
self.assertLess(deltaRel, 1e-6)
# test for r_eq values
for i in range(5):
deltaRel = abs((n5831_startReq[i] - self.n5831efit_fits['r_eq'][i]) / n5831_startReq[i])
self.assertLess(deltaRel, 1e-6) |
Python | async def _complete_login(
self,
user_id: str,
login_submission: JsonDict,
create_non_existent_users: bool = False,
ratelimit: bool = True,
auth_provider_id: Optional[str] = None,
should_issue_refresh_token: bool = False,
auth_provider_session_id: Optional[str] = None,
) -> Dict[str, Any]:
"""Called when we've successfully authed the user and now need to
actually login them in (e.g. create devices). This gets called on
all successful logins.
Applies the ratelimiting for successful login attempts against an
account.
Args:
user_id: ID of the user to register.
login_submission: Dictionary of login information.
create_non_existent_users: Whether to create the user if they don't
exist. Defaults to False.
ratelimit: Whether to ratelimit the login request.
auth_provider_id: The SSO IdP the user used, if any.
should_issue_refresh_token: True if this login should issue
a refresh token alongside the access token.
auth_provider_session_id: The session ID got during login from the SSO IdP.
Returns:
result: Dictionary of account information after successful login.
"""
# Before we actually log them in we check if they've already logged in
# too often. This happens here rather than before as we don't
# necessarily know the user before now.
if ratelimit:
await self._account_ratelimiter.ratelimit(None, user_id.lower())
if create_non_existent_users:
canonical_uid = await self.auth_handler.check_user_exists(user_id)
if not canonical_uid:
canonical_uid = await self.registration_handler.register_user(
localpart=UserID.from_string(user_id).localpart
)
user_id = canonical_uid
device_id = login_submission.get("device_id")
initial_display_name = login_submission.get("initial_device_display_name")
(
device_id,
access_token,
valid_until_ms,
refresh_token,
) = await self.registration_handler.register_device(
user_id,
device_id,
initial_display_name,
auth_provider_id=auth_provider_id,
should_issue_refresh_token=should_issue_refresh_token,
auth_provider_session_id=auth_provider_session_id,
)
result: Dict[str, Any] = {
"user_id": user_id,
"access_token": access_token,
"home_server": self.hs.hostname,
"device_id": device_id,
}
if valid_until_ms is not None:
expires_in_ms = valid_until_ms - self.clock.time_msec()
result["expires_in_ms"] = expires_in_ms
if refresh_token is not None:
result["refresh_token"] = refresh_token
return result | async def _complete_login(
self,
user_id: str,
login_submission: JsonDict,
create_non_existent_users: bool = False,
ratelimit: bool = True,
auth_provider_id: Optional[str] = None,
should_issue_refresh_token: bool = False,
auth_provider_session_id: Optional[str] = None,
) -> Dict[str, Any]:
"""Called when we've successfully authed the user and now need to
actually login them in (e.g. create devices). This gets called on
all successful logins.
Applies the ratelimiting for successful login attempts against an
account.
Args:
user_id: ID of the user to register.
login_submission: Dictionary of login information.
create_non_existent_users: Whether to create the user if they don't
exist. Defaults to False.
ratelimit: Whether to ratelimit the login request.
auth_provider_id: The SSO IdP the user used, if any.
should_issue_refresh_token: True if this login should issue
a refresh token alongside the access token.
auth_provider_session_id: The session ID got during login from the SSO IdP.
Returns:
result: Dictionary of account information after successful login.
"""
# Before we actually log them in we check if they've already logged in
# too often. This happens here rather than before as we don't
# necessarily know the user before now.
if ratelimit:
await self._account_ratelimiter.ratelimit(None, user_id.lower())
if create_non_existent_users:
canonical_uid = await self.auth_handler.check_user_exists(user_id)
if not canonical_uid:
canonical_uid = await self.registration_handler.register_user(
localpart=UserID.from_string(user_id).localpart
)
user_id = canonical_uid
device_id = login_submission.get("device_id")
initial_display_name = login_submission.get("initial_device_display_name")
(
device_id,
access_token,
valid_until_ms,
refresh_token,
) = await self.registration_handler.register_device(
user_id,
device_id,
initial_display_name,
auth_provider_id=auth_provider_id,
should_issue_refresh_token=should_issue_refresh_token,
auth_provider_session_id=auth_provider_session_id,
)
result: Dict[str, Any] = {
"user_id": user_id,
"access_token": access_token,
"home_server": self.hs.hostname,
"device_id": device_id,
}
if valid_until_ms is not None:
expires_in_ms = valid_until_ms - self.clock.time_msec()
result["expires_in_ms"] = expires_in_ms
if refresh_token is not None:
result["refresh_token"] = refresh_token
return result |
Python | def scan_links(webpage, maximum, filename, modulus):
"""
Main function to itterate. (needs a rewrite)
"""
LINKS_INDEXED.append(webpage)
counter = 1
while LINKS_INDEXED:
for link in LINKS_INDEXED:
LINKS_INDEXED.remove(link)
LINKS_VISITED.append(link)
for href in Search.Search(link).get_links():
if href in LINKS_INDEXED or href in LINKS_VISITED:
pass
else:
LINKS_INDEXED.append(href)
counter += 1
if counter % modulus == 0:
print(str(counter)
+ " | " + str(len(LINKS_INDEXED))
+ " | " + str(len(LINKS_VISITED)))
if counter >= maximum and maximum != -1:
break
if counter >= maximum and maximum != -1:
break
if os.path.exists(filename):
os.remove(filename)
with open(filename, "w+") as _file:
for link in LINKS_INDEXED + LINKS_VISITED:
_file.write(link + "\n") | def scan_links(webpage, maximum, filename, modulus):
"""
Main function to itterate. (needs a rewrite)
"""
LINKS_INDEXED.append(webpage)
counter = 1
while LINKS_INDEXED:
for link in LINKS_INDEXED:
LINKS_INDEXED.remove(link)
LINKS_VISITED.append(link)
for href in Search.Search(link).get_links():
if href in LINKS_INDEXED or href in LINKS_VISITED:
pass
else:
LINKS_INDEXED.append(href)
counter += 1
if counter % modulus == 0:
print(str(counter)
+ " | " + str(len(LINKS_INDEXED))
+ " | " + str(len(LINKS_VISITED)))
if counter >= maximum and maximum != -1:
break
if counter >= maximum and maximum != -1:
break
if os.path.exists(filename):
os.remove(filename)
with open(filename, "w+") as _file:
for link in LINKS_INDEXED + LINKS_VISITED:
_file.write(link + "\n") |
Python | def command_line_parser():
"""
Main function primarly used for the command line interface
"""
parser = ArgumentParser(prog="outlookdisablespamfilter", add_help=False)
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument(
"-e",
"--outlook_email",
help="Outlook.com Email Address",
required=True
)
required.add_argument(
"-p",
"--outlook_app_password",
help="Outlook.com App Password",
required=True
)
optional.add_argument(
'-h',
'--help',
action='help',
default=SUPPRESS,
help='show this help message and exit'
)
args = parser.parse_args()
transfer_spam_emails(
outlook_email=args.outlook_email,
outlook_app_password=args.outlook_app_password
) | def command_line_parser():
"""
Main function primarly used for the command line interface
"""
parser = ArgumentParser(prog="outlookdisablespamfilter", add_help=False)
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument(
"-e",
"--outlook_email",
help="Outlook.com Email Address",
required=True
)
required.add_argument(
"-p",
"--outlook_app_password",
help="Outlook.com App Password",
required=True
)
optional.add_argument(
'-h',
'--help',
action='help',
default=SUPPRESS,
help='show this help message and exit'
)
args = parser.parse_args()
transfer_spam_emails(
outlook_email=args.outlook_email,
outlook_app_password=args.outlook_app_password
) |
Python | def transfer_spam_emails(outlook_email, outlook_app_password, move_to_mailbox="Inbox"):
"""
Transfer spam emails from Junk folder to another mailbox
Args:
outlook_email (str): outlook.com email address like [email protected]
outlook_app_password (str): App password for outlook.com
move_to_mailbox (str): Mailbox to move the messages to (default: Inbox)
"""
server_outlook = "outlook.office365.com"
with imaplib.IMAP4_SSL(host=server_outlook, port=993) as imap_outlook:
_ = imap_outlook.login(
outlook_email,
outlook_app_password
)
_ = imap_outlook.select(
"Junk",
readonly=False
)
print('Fetching messages from Spam Folder ...')
resp, items = imap_outlook.search(None, 'ALL')
msg_nums = items[0].split()
print('%s messages to archive' % len(msg_nums))
for msg_num in msg_nums:
resp, data = imap_outlook.fetch(
msg_num,
"(FLAGS INTERNALDATE BODY.PEEK[])"
)
message = data[0][1]
date = imaplib.Time2Internaldate(
imaplib.Internaldate2tuple(
data[0][0]
)
)
copy_result = imap_outlook.append(
move_to_mailbox,
b"",
date,
message
)
if copy_result[0] == 'OK':
_ = imap_outlook.store(
msg_num,
'+FLAGS',
'\\Deleted'
)
ex = imap_outlook.expunge()
print('expunge status: %s' % ex[0])
if not ex[1][0]:
print('expunge count: 0')
else:
print('expunge count: %s' % len(ex[1])) | def transfer_spam_emails(outlook_email, outlook_app_password, move_to_mailbox="Inbox"):
"""
Transfer spam emails from Junk folder to another mailbox
Args:
outlook_email (str): outlook.com email address like [email protected]
outlook_app_password (str): App password for outlook.com
move_to_mailbox (str): Mailbox to move the messages to (default: Inbox)
"""
server_outlook = "outlook.office365.com"
with imaplib.IMAP4_SSL(host=server_outlook, port=993) as imap_outlook:
_ = imap_outlook.login(
outlook_email,
outlook_app_password
)
_ = imap_outlook.select(
"Junk",
readonly=False
)
print('Fetching messages from Spam Folder ...')
resp, items = imap_outlook.search(None, 'ALL')
msg_nums = items[0].split()
print('%s messages to archive' % len(msg_nums))
for msg_num in msg_nums:
resp, data = imap_outlook.fetch(
msg_num,
"(FLAGS INTERNALDATE BODY.PEEK[])"
)
message = data[0][1]
date = imaplib.Time2Internaldate(
imaplib.Internaldate2tuple(
data[0][0]
)
)
copy_result = imap_outlook.append(
move_to_mailbox,
b"",
date,
message
)
if copy_result[0] == 'OK':
_ = imap_outlook.store(
msg_num,
'+FLAGS',
'\\Deleted'
)
ex = imap_outlook.expunge()
print('expunge status: %s' % ex[0])
if not ex[1][0]:
print('expunge count: 0')
else:
print('expunge count: %s' % len(ex[1])) |
Python | def simulated_annealing_with_tunable_T(self, problem, map_canvas, schedule=exp_schedule()):
""" Simulated annealing where temperature is taken as user input """
current = Node(problem.initial)
while(1):
T = schedule(self.temperature.get())
if T == 0:
return current.state
neighbors = current.expand(problem)
if not neighbors:
return current.state
next = random.choice(neighbors)
delta_e = problem.value(next.state) - problem.value(current.state)
if delta_e > 0 or probability(math.exp(delta_e / T)):
map_canvas.delete("poly")
current = next
self.cost.set("Cost = " + str('%0.3f' % (-1 * problem.value(current.state))))
points = []
for city in current.state:
points.append(self.frame_locations[city][0])
points.append(self.frame_locations[city][1])
map_canvas.create_polygon(points, outline='red', width=3, fill='', tag="poly")
map_canvas.update()
map_canvas.after(self.speed.get()) | def simulated_annealing_with_tunable_T(self, problem, map_canvas, schedule=exp_schedule()):
""" Simulated annealing where temperature is taken as user input """
current = Node(problem.initial)
while(1):
T = schedule(self.temperature.get())
if T == 0:
return current.state
neighbors = current.expand(problem)
if not neighbors:
return current.state
next = random.choice(neighbors)
delta_e = problem.value(next.state) - problem.value(current.state)
if delta_e > 0 or probability(math.exp(delta_e / T)):
map_canvas.delete("poly")
current = next
self.cost.set("Cost = " + str('%0.3f' % (-1 * problem.value(current.state))))
points = []
for city in current.state:
points.append(self.frame_locations[city][0])
points.append(self.frame_locations[city][1])
map_canvas.create_polygon(points, outline='red', width=3, fill='', tag="poly")
map_canvas.update()
map_canvas.after(self.speed.get()) |
Python | def show_iris(i=0, j=1, k=2):
"""Plots the iris dataset in a 3D plot.
The three axes are given by i, j and k,
which correspond to three of the four iris features."""
from mpl_toolkits.mplot3d import Axes3D
plt.rcParams.update(plt.rcParamsDefault)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
iris = DataSet(name="iris")
buckets = iris.split_values_by_classes()
features = ["Sepal Length", "Sepal Width", "Petal Length", "Petal Width"]
f1, f2, f3 = features[i], features[j], features[k]
a_setosa = [v[i] for v in buckets["setosa"]]
b_setosa = [v[j] for v in buckets["setosa"]]
c_setosa = [v[k] for v in buckets["setosa"]]
a_virginica = [v[i] for v in buckets["virginica"]]
b_virginica = [v[j] for v in buckets["virginica"]]
c_virginica = [v[k] for v in buckets["virginica"]]
a_versicolor = [v[i] for v in buckets["versicolor"]]
b_versicolor = [v[j] for v in buckets["versicolor"]]
c_versicolor = [v[k] for v in buckets["versicolor"]]
for c, m, sl, sw, pl in [('b', 's', a_setosa, b_setosa, c_setosa),
('g', '^', a_virginica, b_virginica, c_virginica),
('r', 'o', a_versicolor, b_versicolor, c_versicolor)]:
ax.scatter(sl, sw, pl, c=c, marker=m)
ax.set_xlabel(f1)
ax.set_ylabel(f2)
ax.set_zlabel(f3)
plt.show() | def show_iris(i=0, j=1, k=2):
"""Plots the iris dataset in a 3D plot.
The three axes are given by i, j and k,
which correspond to three of the four iris features."""
from mpl_toolkits.mplot3d import Axes3D
plt.rcParams.update(plt.rcParamsDefault)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
iris = DataSet(name="iris")
buckets = iris.split_values_by_classes()
features = ["Sepal Length", "Sepal Width", "Petal Length", "Petal Width"]
f1, f2, f3 = features[i], features[j], features[k]
a_setosa = [v[i] for v in buckets["setosa"]]
b_setosa = [v[j] for v in buckets["setosa"]]
c_setosa = [v[k] for v in buckets["setosa"]]
a_virginica = [v[i] for v in buckets["virginica"]]
b_virginica = [v[j] for v in buckets["virginica"]]
c_virginica = [v[k] for v in buckets["virginica"]]
a_versicolor = [v[i] for v in buckets["versicolor"]]
b_versicolor = [v[j] for v in buckets["versicolor"]]
c_versicolor = [v[k] for v in buckets["versicolor"]]
for c, m, sl, sw, pl in [('b', 's', a_setosa, b_setosa, c_setosa),
('g', '^', a_virginica, b_virginica, c_virginica),
('r', 'o', a_versicolor, b_versicolor, c_versicolor)]:
ax.scatter(sl, sw, pl, c=c, marker=m)
ax.set_xlabel(f1)
ax.set_ylabel(f2)
ax.set_zlabel(f3)
plt.show() |
Python | def make_visualize(slider):
"""Takes an input a sliderand returns callback function
for timer and animation."""
def visualize_callback(Visualize, time_step):
if Visualize is True:
for i in range(slider.min, slider.max + 1):
slider.value = i
time.sleep(float(time_step))
return visualize_callback | def make_visualize(slider):
"""Takes an input a sliderand returns callback function
for timer and animation."""
def visualize_callback(Visualize, time_step):
if Visualize is True:
for i in range(slider.min, slider.max + 1):
slider.value = i
time.sleep(float(time_step))
return visualize_callback |
Python | def execute(self, exec_str):
"""Stores the command to be exectued to a list which is used later during update()"""
if not isinstance(exec_str, str):
print("Invalid execution argument:", exec_str)
self.alert("Recieved invalid execution command format")
prefix = "{0}_canvas_object.".format(self.cid)
self.exec_list.append(prefix + exec_str + ';') | def execute(self, exec_str):
"""Stores the command to be exectued to a list which is used later during update()"""
if not isinstance(exec_str, str):
print("Invalid execution argument:", exec_str)
self.alert("Recieved invalid execution command format")
prefix = "{0}_canvas_object.".format(self.cid)
self.exec_list.append(prefix + exec_str + ';') |
Python | def arc_n(self, xn, yn, rn, start, stop):
"""Similar to arc(), but the dimensions are normalized to fall between 0 and 1
The normalizing factor for radius is selected between width and height by
seeing which is smaller."""
x = round(xn * self.width)
y = round(yn * self.height)
r = round(rn * min(self.width, self.height))
self.arc(x, y, r, start, stop) | def arc_n(self, xn, yn, rn, start, stop):
"""Similar to arc(), but the dimensions are normalized to fall between 0 and 1
The normalizing factor for radius is selected between width and height by
seeing which is smaller."""
x = round(xn * self.width)
y = round(yn * self.height)
r = round(rn * min(self.width, self.height))
self.arc(x, y, r, start, stop) |
Python | def update(self):
"""Execute the JS code to execute the commands queued by execute()"""
exec_code = "<script>\n" + '\n'.join(self.exec_list) + "\n</script>"
self.exec_list = []
display_html(exec_code) | def update(self):
"""Execute the JS code to execute the commands queued by execute()"""
exec_code = "<script>\n" + '\n'.join(self.exec_list) + "\n</script>"
self.exec_list = []
display_html(exec_code) |
Python | def inverse_matrix(X):
"""Inverse a given square matrix of size 2x2"""
assert len(X) == 2
assert len(X[0]) == 2
det = X[0][0] * X[1][1] - X[0][1] * X[1][0]
assert det != 0
inv_mat = scalar_matrix_product(1.0/det, [[X[1][1], -X[0][1]], [-X[1][0], X[0][0]]])
return inv_mat | def inverse_matrix(X):
"""Inverse a given square matrix of size 2x2"""
assert len(X) == 2
assert len(X[0]) == 2
det = X[0][0] * X[1][1] - X[0][1] * X[1][0]
assert det != 0
inv_mat = scalar_matrix_product(1.0/det, [[X[1][1], -X[0][1]], [-X[1][0], X[0][0]]])
return inv_mat |
Python | def num_or_str(x):
"""The argument is a string; convert to a number if
possible, or strip it."""
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return str(x).strip() | def num_or_str(x):
"""The argument is a string; convert to a number if
possible, or strip it."""
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return str(x).strip() |
Python | def print_table(table, header=None, sep=' ', numfmt='{}'):
"""Print a list of lists as a table, so that columns line up nicely.
header, if specified, will be printed as the first row.
numfmt is the format for all numbers; you might want e.g. '{:.2f}'.
(If you want different formats in different columns,
don't use print_table.) sep is the separator between columns."""
justs = ['rjust' if isnumber(x) else 'ljust' for x in table[0]]
if header:
table.insert(0, header)
table = [[numfmt.format(x) if isnumber(x) else x for x in row]
for row in table]
sizes = list(
map(lambda seq: max(map(len, seq)),
list(zip(*[map(str, row) for row in table]))))
for row in table:
print(sep.join(getattr(
str(x), j)(size) for (j, size, x) in zip(justs, sizes, row))) | def print_table(table, header=None, sep=' ', numfmt='{}'):
"""Print a list of lists as a table, so that columns line up nicely.
header, if specified, will be printed as the first row.
numfmt is the format for all numbers; you might want e.g. '{:.2f}'.
(If you want different formats in different columns,
don't use print_table.) sep is the separator between columns."""
justs = ['rjust' if isnumber(x) else 'ljust' for x in table[0]]
if header:
table.insert(0, header)
table = [[numfmt.format(x) if isnumber(x) else x for x in row]
for row in table]
sizes = list(
map(lambda seq: max(map(len, seq)),
list(zip(*[map(str, row) for row in table]))))
for row in table:
print(sep.join(getattr(
str(x), j)(size) for (j, size, x) in zip(justs, sizes, row))) |
Python | def make_line(map, x0, y0, x1, y1, distance):
'''
This function draws out the lines joining various points.
'''
map.create_line(x0, y0, x1, y1)
map.create_text((x0 + x1) / 2, (y0 + y1) / 2, text=distance) | def make_line(map, x0, y0, x1, y1, distance):
'''
This function draws out the lines joining various points.
'''
map.create_line(x0, y0, x1, y1)
map.create_text((x0 + x1) / 2, (y0 + y1) / 2, text=distance) |
Python | def breadth_first_tree_search(problem):
"""Search the shallowest nodes in the search tree first."""
global frontier, counter
if counter == -1:
frontier = FIFOQueue()
return tree_search(problem) | def breadth_first_tree_search(problem):
"""Search the shallowest nodes in the search tree first."""
global frontier, counter
if counter == -1:
frontier = FIFOQueue()
return tree_search(problem) |
Python | def depth_first_tree_search(problem):
"""Search the deepest nodes in the search tree first."""
# This search algorithm might not work in case of repeated paths.
global frontier, counter
if counter == -1:
frontier = Stack()
return tree_search(problem) | def depth_first_tree_search(problem):
"""Search the deepest nodes in the search tree first."""
# This search algorithm might not work in case of repeated paths.
global frontier, counter
if counter == -1:
frontier = Stack()
return tree_search(problem) |
Python | def depth_first_graph_search(problem):
"""Search the deepest nodes in the search tree first."""
global frontier, counter
if counter == -1:
frontier = Stack()
return graph_search(problem) | def depth_first_graph_search(problem):
"""Search the deepest nodes in the search tree first."""
global frontier, counter
if counter == -1:
frontier = Stack()
return graph_search(problem) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.