language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def generic_mo_from_xml(xml_str):
"""
create GenericMo object from xml string
"""
root_elem = ET.fromstring(xml_str)
class_id = root_elem.tag
gmo = GenericMo(class_id)
gmo.from_xml(root_elem)
return gmo | def generic_mo_from_xml(xml_str):
"""
create GenericMo object from xml string
"""
root_elem = ET.fromstring(xml_str)
class_id = root_elem.tag
gmo = GenericMo(class_id)
gmo.from_xml(root_elem)
return gmo |
Python | def generic_mo_from_xml_elem(elem):
"""
create GenericMo object from xml element
"""
from . import ucscxmlcodec as xc
xml_str = xc.to_xml_str(elem)
gmo = generic_mo_from_xml(xml_str)
return gmo | def generic_mo_from_xml_elem(elem):
"""
create GenericMo object from xml element
"""
from . import ucscxmlcodec as xc
xml_str = xc.to_xml_str(elem)
gmo = generic_mo_from_xml(xml_str)
return gmo |
Python | def to_xml(self, xml_doc=None, option=None):
"""
This method returns the xml element node for the current object
with it's hierarchy.
Args:
xml_doc: document to which the Mo attributes are added.
Can be None.
option: not required for Generic Mo class object
Example:
from ucscsdk.ucscmo import GenericMo\n
args = {"a": 1, "b": 2, "c":3}\n
obj = GenericMo("testLsA", "org-root", **args)\n
obj1 = GenericMo("testLsB", "org-root", **args)\n
obj.add_child(obj1)\n
elem = obj.write_xml()\n
import ucscsdk.ucscxmlcodec as xc\n
xc.to_xml_str(elem)\n
Output:
'<testLsA a="1" b="2" c="3" dn="org-root/" rn="">\n
<testLsB a="1" b="2" c="3" dn="org-root/" rn="" />\n
</testLsA>'
"""
if xml_doc is None:
xml_obj = Element(ucscgenutils.word_l(self._class_id))
else:
xml_obj = SubElement(xml_doc, ucscgenutils.word_l(
self._class_id))
for key in self.__dict__:
if not key.startswith('_'):
xml_obj.set(key, getattr(self, key))
self.child_to_xml(xml_obj)
return xml_obj | def to_xml(self, xml_doc=None, option=None):
"""
This method returns the xml element node for the current object
with it's hierarchy.
Args:
xml_doc: document to which the Mo attributes are added.
Can be None.
option: not required for Generic Mo class object
Example:
from ucscsdk.ucscmo import GenericMo\n
args = {"a": 1, "b": 2, "c":3}\n
obj = GenericMo("testLsA", "org-root", **args)\n
obj1 = GenericMo("testLsB", "org-root", **args)\n
obj.add_child(obj1)\n
elem = obj.write_xml()\n
import ucscsdk.ucscxmlcodec as xc\n
xc.to_xml_str(elem)\n
Output:
'<testLsA a="1" b="2" c="3" dn="org-root/" rn="">\n
<testLsB a="1" b="2" c="3" dn="org-root/" rn="" />\n
</testLsA>'
"""
if xml_doc is None:
xml_obj = Element(ucscgenutils.word_l(self._class_id))
else:
xml_obj = SubElement(xml_doc, ucscgenutils.word_l(
self._class_id))
for key in self.__dict__:
if not key.startswith('_'):
xml_obj.set(key, getattr(self, key))
self.child_to_xml(xml_obj)
return xml_obj |
Python | def from_xml(self, elem, handle=None):
"""
This method is form objects out of xml element.
This is called internally from ucscxmlcode.from_xml_str
method.
Example:
xml = '<testLsA a="1" b="2" c="3" dn="org-root/" rn="">
<testLsB a="1" b="2" c="3" dn="org-root/" rn="" /></testLsA>'\n
obj = xc.from_xml_str(xml)\n
print type(obj)\n
Outputs:
<class 'ucscsdk.ucscmo.GenericMo'>
"""
if elem is None:
return None
self._handle = handle
self._class_id = elem.tag
if elem.attrib:
for name, value in ucscgenutils.iteritems(elem.attrib):
self.__dict__[name] = value
self.__properties[name] = str(value)
if self.rn and self.dn:
pass
elif self.rn and not self.dn:
if self.__parent_dn is not None and self.__parent_dn != "":
self.dn = self.__parent_dn + '/' + self.rn
self.__properties['dn'] = self.dn
else:
self.dn = self.rn
self.__properties['dn'] = self.dn
elif not self.rn and self.dn:
self.rn = os.path.basename(self.dn)
self.__properties['rn'] = self.rn
# else:
# raise ValueError("Both rn and dn does not present.")
children = list(elem)
if children:
for child in children:
if not ET.iselement(child):
continue
class_id = ucscgenutils.word_u(child.tag)
pdn = None
if 'dn' in dir(self):
pdn = self.dn
child_obj = GenericMo(class_id, parent_mo_or_dn=pdn)
self.child_add(child_obj)
child_obj.from_xml(child, handle) | def from_xml(self, elem, handle=None):
"""
This method is form objects out of xml element.
This is called internally from ucscxmlcode.from_xml_str
method.
Example:
xml = '<testLsA a="1" b="2" c="3" dn="org-root/" rn="">
<testLsB a="1" b="2" c="3" dn="org-root/" rn="" /></testLsA>'\n
obj = xc.from_xml_str(xml)\n
print type(obj)\n
Outputs:
<class 'ucscsdk.ucscmo.GenericMo'>
"""
if elem is None:
return None
self._handle = handle
self._class_id = elem.tag
if elem.attrib:
for name, value in ucscgenutils.iteritems(elem.attrib):
self.__dict__[name] = value
self.__properties[name] = str(value)
if self.rn and self.dn:
pass
elif self.rn and not self.dn:
if self.__parent_dn is not None and self.__parent_dn != "":
self.dn = self.__parent_dn + '/' + self.rn
self.__properties['dn'] = self.dn
else:
self.dn = self.rn
self.__properties['dn'] = self.dn
elif not self.rn and self.dn:
self.rn = os.path.basename(self.dn)
self.__properties['rn'] = self.rn
# else:
# raise ValueError("Both rn and dn does not present.")
children = list(elem)
if children:
for child in children:
if not ET.iselement(child):
continue
class_id = ucscgenutils.word_u(child.tag)
pdn = None
if 'dn' in dir(self):
pdn = self.dn
child_obj = GenericMo(class_id, parent_mo_or_dn=pdn)
self.child_add(child_obj)
child_obj.from_xml(child, handle) |
Python | def __get_mo_obj(self, class_id):
"""
Internal methods to create managed object from class_id
"""
import inspect
mo_class = ucsccoreutils.load_class(class_id)
mo_class_params = inspect.getargspec(mo_class.__init__)[0][2:]
mo_class_param_dict = {}
for param in mo_class_params:
mo_param = mo_class.prop_meta[param].xml_attribute
if mo_param not in self.__properties:
if 'rn' in self.__properties:
rn_str = self.__properties['rn']
elif 'dn' in self.__properties:
rn_str = os.path.basename(self.__properties['dn'])
rn_pattern = mo_class.mo_meta.rn
np_dict = ucsccoreutils.get_naming_props(rn_str,
rn_pattern)
if param not in np_dict:
mo_class_param_dict[param] = ""
else:
mo_class_param_dict[param] = np_dict[param]
else:
mo_class_param_dict[param] = self.__properties[mo_param]
p_dn = ""
if 'topRoot' in mo_class.mo_meta.parents:
mo_obj = mo_class(**mo_class_param_dict)
else:
mo_obj = mo_class(parent_mo_or_dn=p_dn, **mo_class_param_dict)
return mo_obj | def __get_mo_obj(self, class_id):
"""
Internal methods to create managed object from class_id
"""
import inspect
mo_class = ucsccoreutils.load_class(class_id)
mo_class_params = inspect.getargspec(mo_class.__init__)[0][2:]
mo_class_param_dict = {}
for param in mo_class_params:
mo_param = mo_class.prop_meta[param].xml_attribute
if mo_param not in self.__properties:
if 'rn' in self.__properties:
rn_str = self.__properties['rn']
elif 'dn' in self.__properties:
rn_str = os.path.basename(self.__properties['dn'])
rn_pattern = mo_class.mo_meta.rn
np_dict = ucsccoreutils.get_naming_props(rn_str,
rn_pattern)
if param not in np_dict:
mo_class_param_dict[param] = ""
else:
mo_class_param_dict[param] = np_dict[param]
else:
mo_class_param_dict[param] = self.__properties[mo_param]
p_dn = ""
if 'topRoot' in mo_class.mo_meta.parents:
mo_obj = mo_class(**mo_class_param_dict)
else:
mo_obj = mo_class(parent_mo_or_dn=p_dn, **mo_class_param_dict)
return mo_obj |
Python | def from_xml(self, elem, handle=None):
"""This method creates the object from the xml representation
of the Method object."""
self._handle = handle
if elem.attrib:
for attr_name, attr_value in ucscgenutils.iteritems(elem.attrib):
self.attr_set(
ucscgenutils.convert_to_python_var_name(attr_name),
str(attr_value))
child_elems = list(elem)
if child_elems:
for child_elem in child_elems:
if not ET.iselement(child_elem):
continue
cln = ucscgenutils.word_u(child_elem.tag)
child = ucsccoreutils.get_ucsc_obj(cln, child_elem)
self._child.append(child)
child.from_xml(child_elem, handle) | def from_xml(self, elem, handle=None):
"""This method creates the object from the xml representation
of the Method object."""
self._handle = handle
if elem.attrib:
for attr_name, attr_value in ucscgenutils.iteritems(elem.attrib):
self.attr_set(
ucscgenutils.convert_to_python_var_name(attr_name),
str(attr_value))
child_elems = list(elem)
if child_elems:
for child_elem in child_elems:
if not ET.iselement(child_elem):
continue
cln = ucscgenutils.word_u(child_elem.tag)
child = ucsccoreutils.get_ucsc_obj(cln, child_elem)
self._child.append(child)
child.from_xml(child_elem, handle) |
Python | def download_image_by_asset_path(asset_path, output_folder):
"""
Downloads an individual image, given its asset path, and saves it to output_folder.
Returns a list of the downloaded image or images
"""
### Get the download URL from EE
image = ee.Image(asset_path) # this approach comes from https://github.com/google/earthengine-api/blob/master/python/examples/py/Image/download.py
path = image.getDownloadUrl({
#'scale': 30,
#'crs': 'EPSG:3310',
#'region': '[[-120, 35], [-119, 35], [-119, 34], [-120, 34]]'
})
### Do some name management things
output_name = os.path.split(asset_path)[1]
zipfile = output_name + ".zip"
download_path = os.path.join(output_folder, zipfile) # comes as a zip file with a .tfw
# check that the output path exists - create it if not
makedirs(output_folder)
c = Curler()
success = c.download(url=path, path=download_path)
if not success:
raise RuntimeError("Unable to retrieve file at {} - please check in your browser and try again (make sure to log into EE first).".format(path))
### Extract the Zip and delete it
if not is_zipfile(download_path):
raise RuntimeError("Downloaded file was not a zip file!")
with open(download_path, 'r') as zf:
z = ZipFile(zf)
downloaded_items = [os.path.join(output_folder,item) for item in z.namelist() if not item.endswith("tfw") ]
z.extractall(path=output_folder)
z.close()
del z
os.remove(download_path)
return downloaded_items | def download_image_by_asset_path(asset_path, output_folder):
"""
Downloads an individual image, given its asset path, and saves it to output_folder.
Returns a list of the downloaded image or images
"""
### Get the download URL from EE
image = ee.Image(asset_path) # this approach comes from https://github.com/google/earthengine-api/blob/master/python/examples/py/Image/download.py
path = image.getDownloadUrl({
#'scale': 30,
#'crs': 'EPSG:3310',
#'region': '[[-120, 35], [-119, 35], [-119, 34], [-120, 34]]'
})
### Do some name management things
output_name = os.path.split(asset_path)[1]
zipfile = output_name + ".zip"
download_path = os.path.join(output_folder, zipfile) # comes as a zip file with a .tfw
# check that the output path exists - create it if not
makedirs(output_folder)
c = Curler()
success = c.download(url=path, path=download_path)
if not success:
raise RuntimeError("Unable to retrieve file at {} - please check in your browser and try again (make sure to log into EE first).".format(path))
### Extract the Zip and delete it
if not is_zipfile(download_path):
raise RuntimeError("Downloaded file was not a zip file!")
with open(download_path, 'r') as zf:
z = ZipFile(zf)
downloaded_items = [os.path.join(output_folder,item) for item in z.namelist() if not item.endswith("tfw") ]
z.extractall(path=output_folder)
z.close()
del z
os.remove(download_path)
return downloaded_items |
Python | def download_images_in_collection(collection_id, output_folder, max_items=max_items):
"""
Downloads images in ImageCollection specified by collection_id and saves them into
the location specified in output folder, up to max_items downloads. Set max_items
to a very high number to download all items.
Returns a list with the full paths to the downloaded images.
"""
### Get all of the items in the collection
collection = ee.ImageCollection(collection_id)
collection_items = collection.toList(max_items).getInfo()
downloaded_items = []
for item in collection_items:
downloaded_items += download_image_by_asset_path(item["id"], output_folder) # extend the list with the new list that's produced - don't append
return downloaded_items | def download_images_in_collection(collection_id, output_folder, max_items=max_items):
"""
Downloads images in ImageCollection specified by collection_id and saves them into
the location specified in output folder, up to max_items downloads. Set max_items
to a very high number to download all items.
Returns a list with the full paths to the downloaded images.
"""
### Get all of the items in the collection
collection = ee.ImageCollection(collection_id)
collection_items = collection.toList(max_items).getInfo()
downloaded_items = []
for item in collection_items:
downloaded_items += download_image_by_asset_path(item["id"], output_folder) # extend the list with the new list that's produced - don't append
return downloaded_items |
Python | def process_pldmtool_output(process):
""" Ensure pldmtool runs without error and if it does fail, detect that and
show the pldmtool exit status and it's stderr.
A simpler implementation would just wait for the pldmtool exit status
prior to attempting to decode it's stdout. Instead, optimize for the
no error case and allow the json decoder to consume pldmtool stdout as
soon as it is available (in parallel). This results in the following
error scenarios:
- pldmtool fails and the decoder fails
Ignore the decoder fail and throw PLDMToolError.
- pldmtool fails and the decoder doesn't fail
Throw PLDMToolError.
- pldmtool doesn't fail and the decoder does fail
This is a pldmtool bug - re-throw the decoder error.
Parameters:
process: A Process object providing process control functions like
wait, and access functions such as reading stdout and
stderr.
"""
status = 0
try:
data = json.load(process.stdout)
# it's unlikely, but possible, that pldmtool failed but still wrote a
# valid json document - so check for that.
status = process.wait()
if status == 0:
return data
except json.decoder.JSONDecodeError:
# pldmtool wrote an invalid json document. Check to see if it had
# non-zero exit status.
status = process.wait()
if status == 0:
# pldmtool didn't have non zero exit status, so it wrote an invalid
# json document and the JSONDecodeError is the correct error.
raise
# pldmtool had a non-zero exit status, so throw an error for that, possibly
# discarding a spurious JSONDecodeError exception.
raise PLDMToolError(status, "".join(process.stderr)) | def process_pldmtool_output(process):
""" Ensure pldmtool runs without error and if it does fail, detect that and
show the pldmtool exit status and it's stderr.
A simpler implementation would just wait for the pldmtool exit status
prior to attempting to decode it's stdout. Instead, optimize for the
no error case and allow the json decoder to consume pldmtool stdout as
soon as it is available (in parallel). This results in the following
error scenarios:
- pldmtool fails and the decoder fails
Ignore the decoder fail and throw PLDMToolError.
- pldmtool fails and the decoder doesn't fail
Throw PLDMToolError.
- pldmtool doesn't fail and the decoder does fail
This is a pldmtool bug - re-throw the decoder error.
Parameters:
process: A Process object providing process control functions like
wait, and access functions such as reading stdout and
stderr.
"""
status = 0
try:
data = json.load(process.stdout)
# it's unlikely, but possible, that pldmtool failed but still wrote a
# valid json document - so check for that.
status = process.wait()
if status == 0:
return data
except json.decoder.JSONDecodeError:
# pldmtool wrote an invalid json document. Check to see if it had
# non-zero exit status.
status = process.wait()
if status == 0:
# pldmtool didn't have non zero exit status, so it wrote an invalid
# json document and the JSONDecodeError is the correct error.
raise
# pldmtool had a non-zero exit status, so throw an error for that, possibly
# discarding a spurious JSONDecodeError exception.
raise PLDMToolError(status, "".join(process.stderr)) |
Python | def fetch_pdrs_from_bmc(executor):
""" This is the core function that would fire the getPDR pldmtool command
and it then agreegates the data received from all the calls into the
respective dictionaries based on the PDR Type.
Parameters:
executor: executor object for running pldmtool
"""
entity_association_pdr = {}
state_sensor_pdr = {}
state_effecter_pdr = {}
state_effecter_pdr = {}
numeric_pdr = {}
fru_record_set_pdr = {}
tl_pdr = {}
for handle_number, my_dic in get_pdrs(executor):
if sys.stdout.isatty():
sys.stdout.write(
"Fetching PDR's from BMC : %8d\r" % (handle_number))
sys.stdout.flush()
if my_dic["PDRType"] == "Entity Association PDR":
entity_association_pdr[handle_number] = my_dic
if my_dic["PDRType"] == "State Sensor PDR":
state_sensor_pdr[handle_number] = my_dic
if my_dic["PDRType"] == "State Effecter PDR":
state_effecter_pdr[handle_number] = my_dic
if my_dic["PDRType"] == "FRU Record Set PDR":
fru_record_set_pdr[handle_number] = my_dic
if my_dic["PDRType"] == "Terminus Locator PDR":
tl_pdr[handle_number] = my_dic
if my_dic["PDRType"] == "Numeric Effecter PDR":
numeric_pdr[handle_number] = my_dic
executor.close()
total_pdrs = len(entity_association_pdr.keys()) + len(tl_pdr.keys()) + \
len(state_effecter_pdr.keys()) + len(numeric_pdr.keys()) + \
len(state_sensor_pdr.keys()) + len(fru_record_set_pdr.keys())
print("\nSuccessfully fetched " + str(total_pdrs) + " PDR\'s")
print("Number of FRU Record PDR's : ", len(fru_record_set_pdr.keys()))
print("Number of TerminusLocator PDR's : ", len(tl_pdr.keys()))
print("Number of State Sensor PDR's : ", len(state_sensor_pdr.keys()))
print("Number of State Effecter PDR's : ", len(state_effecter_pdr.keys()))
print("Number of Numeric Effecter PDR's : ", len(numeric_pdr.keys()))
print("Number of Entity Association PDR's : ",
len(entity_association_pdr.keys()))
return (entity_association_pdr, state_sensor_pdr,
state_effecter_pdr, len(fru_record_set_pdr.keys())) | def fetch_pdrs_from_bmc(executor):
""" This is the core function that would fire the getPDR pldmtool command
and it then agreegates the data received from all the calls into the
respective dictionaries based on the PDR Type.
Parameters:
executor: executor object for running pldmtool
"""
entity_association_pdr = {}
state_sensor_pdr = {}
state_effecter_pdr = {}
state_effecter_pdr = {}
numeric_pdr = {}
fru_record_set_pdr = {}
tl_pdr = {}
for handle_number, my_dic in get_pdrs(executor):
if sys.stdout.isatty():
sys.stdout.write(
"Fetching PDR's from BMC : %8d\r" % (handle_number))
sys.stdout.flush()
if my_dic["PDRType"] == "Entity Association PDR":
entity_association_pdr[handle_number] = my_dic
if my_dic["PDRType"] == "State Sensor PDR":
state_sensor_pdr[handle_number] = my_dic
if my_dic["PDRType"] == "State Effecter PDR":
state_effecter_pdr[handle_number] = my_dic
if my_dic["PDRType"] == "FRU Record Set PDR":
fru_record_set_pdr[handle_number] = my_dic
if my_dic["PDRType"] == "Terminus Locator PDR":
tl_pdr[handle_number] = my_dic
if my_dic["PDRType"] == "Numeric Effecter PDR":
numeric_pdr[handle_number] = my_dic
executor.close()
total_pdrs = len(entity_association_pdr.keys()) + len(tl_pdr.keys()) + \
len(state_effecter_pdr.keys()) + len(numeric_pdr.keys()) + \
len(state_sensor_pdr.keys()) + len(fru_record_set_pdr.keys())
print("\nSuccessfully fetched " + str(total_pdrs) + " PDR\'s")
print("Number of FRU Record PDR's : ", len(fru_record_set_pdr.keys()))
print("Number of TerminusLocator PDR's : ", len(tl_pdr.keys()))
print("Number of State Sensor PDR's : ", len(state_sensor_pdr.keys()))
print("Number of State Effecter PDR's : ", len(state_effecter_pdr.keys()))
print("Number of Numeric Effecter PDR's : ", len(numeric_pdr.keys()))
print("Number of Entity Association PDR's : ",
len(entity_association_pdr.keys()))
return (entity_association_pdr, state_sensor_pdr,
state_effecter_pdr, len(fru_record_set_pdr.keys())) |
Python | def main():
""" Create a summary table capturing the information of all the PDR's
from the BMC & also create a diagram that captures the entity
association hierarchy."""
parser = argparse.ArgumentParser(prog='pldm_visualise_pdrs.py')
parser.add_argument('--bmc', type=str, help="BMC IPAddress/BMC Hostname")
parser.add_argument('--user', type=str, help="BMC username")
parser.add_argument('--password', type=str, help="BMC Password")
parser.add_argument('--port', type=int, help="BMC SSH port",
default=22)
args = parser.parse_args()
extra_cfg = {}
if args.bmc:
try:
with open(os.path.expanduser("~/.ssh/config")) as f:
ssh_config = paramiko.SSHConfig()
ssh_config.parse(f)
host_config = ssh_config.lookup(args.bmc)
if host_config:
if 'hostname' in host_config:
args.bmc = host_config['hostname']
if 'user' in host_config and args.user is None:
args.user = host_config['user']
if 'proxycommand' in host_config:
extra_cfg['sock'] = paramiko.ProxyCommand(
host_config['proxycommand'])
except FileNotFoundError:
pass
executor = ParamikoExecutor(
args.bmc, args.user, args.password, args.port, **extra_cfg)
elif shutil.which('pldmtool'):
executor = SubprocessExecutor()
else:
sys.exit("Can't find any PDRs: specify remote BMC with --bmc or "
"install pldmtool.")
association_pdr, state_sensor_pdr, state_effecter_pdr, counter = \
fetch_pdrs_from_bmc(executor)
draw_entity_associations(association_pdr, counter)
prepare_summary_report(state_sensor_pdr, state_effecter_pdr) | def main():
""" Create a summary table capturing the information of all the PDR's
from the BMC & also create a diagram that captures the entity
association hierarchy."""
parser = argparse.ArgumentParser(prog='pldm_visualise_pdrs.py')
parser.add_argument('--bmc', type=str, help="BMC IPAddress/BMC Hostname")
parser.add_argument('--user', type=str, help="BMC username")
parser.add_argument('--password', type=str, help="BMC Password")
parser.add_argument('--port', type=int, help="BMC SSH port",
default=22)
args = parser.parse_args()
extra_cfg = {}
if args.bmc:
try:
with open(os.path.expanduser("~/.ssh/config")) as f:
ssh_config = paramiko.SSHConfig()
ssh_config.parse(f)
host_config = ssh_config.lookup(args.bmc)
if host_config:
if 'hostname' in host_config:
args.bmc = host_config['hostname']
if 'user' in host_config and args.user is None:
args.user = host_config['user']
if 'proxycommand' in host_config:
extra_cfg['sock'] = paramiko.ProxyCommand(
host_config['proxycommand'])
except FileNotFoundError:
pass
executor = ParamikoExecutor(
args.bmc, args.user, args.password, args.port, **extra_cfg)
elif shutil.which('pldmtool'):
executor = SubprocessExecutor()
else:
sys.exit("Can't find any PDRs: specify remote BMC with --bmc or "
"install pldmtool.")
association_pdr, state_sensor_pdr, state_effecter_pdr, counter = \
fetch_pdrs_from_bmc(executor)
draw_entity_associations(association_pdr, counter)
prepare_summary_report(state_sensor_pdr, state_effecter_pdr) |
Python | def image_modification_date_from_exif(imagename):
"""
Set the modification/creation date of the image to its EXIF data.
:param imagename: name of the image to handle
"""
if os.path.exists(imagename):
print("Parsing {0}...".format(imagename))
filestream = open(imagename, 'rb')
tags = exifread.process_file(filestream)
start_time = tags.get('EXIF DateTimeOriginal')
print(start_time)
timestamp = datetime.strptime(start_time.values, "%Y:%m:%d %H:%M:%S")
mt = int(time.mktime(timestamp.timetuple()))
os.utime(imagename, (mt, mt))
return timestamp
return None | def image_modification_date_from_exif(imagename):
"""
Set the modification/creation date of the image to its EXIF data.
:param imagename: name of the image to handle
"""
if os.path.exists(imagename):
print("Parsing {0}...".format(imagename))
filestream = open(imagename, 'rb')
tags = exifread.process_file(filestream)
start_time = tags.get('EXIF DateTimeOriginal')
print(start_time)
timestamp = datetime.strptime(start_time.values, "%Y:%m:%d %H:%M:%S")
mt = int(time.mktime(timestamp.timetuple()))
os.utime(imagename, (mt, mt))
return timestamp
return None |
Python | def _generate_calc_job(folder, entry_point_name, inputs=None):
"""Fixture to generate a mock ``CalcInfo`` for testing calculation jobs."""
from aiida.engine.utils import instantiate_process
from aiida.manage.manager import get_manager
from aiida.plugins import CalculationFactory
manager = get_manager()
runner = manager.get_runner()
process_class = CalculationFactory(entry_point_name)
process = instantiate_process(runner, process_class, **inputs)
calc_info = process.prepare_for_submission(folder)
return calc_info | def _generate_calc_job(folder, entry_point_name, inputs=None):
"""Fixture to generate a mock ``CalcInfo`` for testing calculation jobs."""
from aiida.engine.utils import instantiate_process
from aiida.manage.manager import get_manager
from aiida.plugins import CalculationFactory
manager = get_manager()
runner = manager.get_runner()
process_class = CalculationFactory(entry_point_name)
process = instantiate_process(runner, process_class, **inputs)
calc_info = process.prepare_for_submission(folder)
return calc_info |
Python | def generate_calc_job_node():
"""Fixture to generate a mock `CalcJobNode` for testing parsers."""
from aiida import orm
import collections # pylint: disable=syntax-error
def flatten_inputs(inputs, prefix=''):
"""This function follows roughly the same logic
as `aiida.engine.processes.process::Process._flatten_inputs`."""
flat_inputs = []
for key, value in inputs.items():
if isinstance(value, collections.abc.Mapping):
flat_inputs.extend(
flatten_inputs(value, prefix=prefix + key + '__'))
else:
flat_inputs.append((prefix + key, value))
return flat_inputs
def _generate_calc_job_node(entry_point_name,
computer,
test_name=None,
inputs=None,
attributes=None):
"""Fixture to generate a mock `CalcJobNode` for testing parsers.
:param entry_point_name: entry point name of the calculation class
:param computer: a `Computer` instance
:param test_name: relative path of directory
:param inputs: any optional nodes to add as input links to the corrent CalcJobNode
:param attributes: any optional attributes to set on the node
:return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node
"""
# pylint: disable=too-many-locals
import os
from aiida.common import LinkType
from aiida.plugins.entry_point import format_entry_point_string
entry_point = format_entry_point_string('aiida.calculations',
entry_point_name)
node = orm.CalcJobNode(computer=computer, process_type=entry_point)
node.set_option('resources', {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
})
node.set_option('max_wallclock_seconds', 1800)
if attributes:
node.set_attribute_many(attributes)
if inputs:
metadata = inputs.pop('metadata', {})
options = metadata.get('options', {})
for name, option in options.items():
node.set_option(name, option)
for link_label, input_node in flatten_inputs(inputs):
input_node.store()
node.add_incoming(input_node,
link_type=LinkType.INPUT_CALC,
link_label=link_label)
node.store()
if test_name is not None:
basepath = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(basepath, 'parsers', 'fixtures', 'catmap',
test_name)
retrieved = orm.FolderData()
retrieved.put_object_from_tree(filepath)
retrieved.add_incoming(node,
link_type=LinkType.CREATE,
link_label='retrieved')
retrieved.store()
remote_folder = orm.RemoteData(computer=computer,
remote_path='/tmp')
remote_folder.add_incoming(node,
link_type=LinkType.CREATE,
link_label='remote_folder')
remote_folder.store()
return node
return _generate_calc_job_node | def generate_calc_job_node():
"""Fixture to generate a mock `CalcJobNode` for testing parsers."""
from aiida import orm
import collections # pylint: disable=syntax-error
def flatten_inputs(inputs, prefix=''):
"""This function follows roughly the same logic
as `aiida.engine.processes.process::Process._flatten_inputs`."""
flat_inputs = []
for key, value in inputs.items():
if isinstance(value, collections.abc.Mapping):
flat_inputs.extend(
flatten_inputs(value, prefix=prefix + key + '__'))
else:
flat_inputs.append((prefix + key, value))
return flat_inputs
def _generate_calc_job_node(entry_point_name,
computer,
test_name=None,
inputs=None,
attributes=None):
"""Fixture to generate a mock `CalcJobNode` for testing parsers.
:param entry_point_name: entry point name of the calculation class
:param computer: a `Computer` instance
:param test_name: relative path of directory
:param inputs: any optional nodes to add as input links to the corrent CalcJobNode
:param attributes: any optional attributes to set on the node
:return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node
"""
# pylint: disable=too-many-locals
import os
from aiida.common import LinkType
from aiida.plugins.entry_point import format_entry_point_string
entry_point = format_entry_point_string('aiida.calculations',
entry_point_name)
node = orm.CalcJobNode(computer=computer, process_type=entry_point)
node.set_option('resources', {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
})
node.set_option('max_wallclock_seconds', 1800)
if attributes:
node.set_attribute_many(attributes)
if inputs:
metadata = inputs.pop('metadata', {})
options = metadata.get('options', {})
for name, option in options.items():
node.set_option(name, option)
for link_label, input_node in flatten_inputs(inputs):
input_node.store()
node.add_incoming(input_node,
link_type=LinkType.INPUT_CALC,
link_label=link_label)
node.store()
if test_name is not None:
basepath = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(basepath, 'parsers', 'fixtures', 'catmap',
test_name)
retrieved = orm.FolderData()
retrieved.put_object_from_tree(filepath)
retrieved.add_incoming(node,
link_type=LinkType.CREATE,
link_label='retrieved')
retrieved.store()
remote_folder = orm.RemoteData(computer=computer,
remote_path='/tmp')
remote_folder.add_incoming(node,
link_type=LinkType.CREATE,
link_label='remote_folder')
remote_folder.store()
return node
return _generate_calc_job_node |
Python | def _generate_calc_job_node(entry_point_name,
computer,
test_name=None,
inputs=None,
attributes=None):
"""Fixture to generate a mock `CalcJobNode` for testing parsers.
:param entry_point_name: entry point name of the calculation class
:param computer: a `Computer` instance
:param test_name: relative path of directory
:param inputs: any optional nodes to add as input links to the corrent CalcJobNode
:param attributes: any optional attributes to set on the node
:return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node
"""
# pylint: disable=too-many-locals
import os
from aiida.common import LinkType
from aiida.plugins.entry_point import format_entry_point_string
entry_point = format_entry_point_string('aiida.calculations',
entry_point_name)
node = orm.CalcJobNode(computer=computer, process_type=entry_point)
node.set_option('resources', {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
})
node.set_option('max_wallclock_seconds', 1800)
if attributes:
node.set_attribute_many(attributes)
if inputs:
metadata = inputs.pop('metadata', {})
options = metadata.get('options', {})
for name, option in options.items():
node.set_option(name, option)
for link_label, input_node in flatten_inputs(inputs):
input_node.store()
node.add_incoming(input_node,
link_type=LinkType.INPUT_CALC,
link_label=link_label)
node.store()
if test_name is not None:
basepath = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(basepath, 'parsers', 'fixtures', 'catmap',
test_name)
retrieved = orm.FolderData()
retrieved.put_object_from_tree(filepath)
retrieved.add_incoming(node,
link_type=LinkType.CREATE,
link_label='retrieved')
retrieved.store()
remote_folder = orm.RemoteData(computer=computer,
remote_path='/tmp')
remote_folder.add_incoming(node,
link_type=LinkType.CREATE,
link_label='remote_folder')
remote_folder.store()
return node | def _generate_calc_job_node(entry_point_name,
computer,
test_name=None,
inputs=None,
attributes=None):
"""Fixture to generate a mock `CalcJobNode` for testing parsers.
:param entry_point_name: entry point name of the calculation class
:param computer: a `Computer` instance
:param test_name: relative path of directory
:param inputs: any optional nodes to add as input links to the corrent CalcJobNode
:param attributes: any optional attributes to set on the node
:return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node
"""
# pylint: disable=too-many-locals
import os
from aiida.common import LinkType
from aiida.plugins.entry_point import format_entry_point_string
entry_point = format_entry_point_string('aiida.calculations',
entry_point_name)
node = orm.CalcJobNode(computer=computer, process_type=entry_point)
node.set_option('resources', {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
})
node.set_option('max_wallclock_seconds', 1800)
if attributes:
node.set_attribute_many(attributes)
if inputs:
metadata = inputs.pop('metadata', {})
options = metadata.get('options', {})
for name, option in options.items():
node.set_option(name, option)
for link_label, input_node in flatten_inputs(inputs):
input_node.store()
node.add_incoming(input_node,
link_type=LinkType.INPUT_CALC,
link_label=link_label)
node.store()
if test_name is not None:
basepath = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(basepath, 'parsers', 'fixtures', 'catmap',
test_name)
retrieved = orm.FolderData()
retrieved.put_object_from_tree(filepath)
retrieved.add_incoming(node,
link_type=LinkType.CREATE,
link_label='retrieved')
retrieved.store()
remote_folder = orm.RemoteData(computer=computer,
remote_path='/tmp')
remote_folder.add_incoming(node,
link_type=LinkType.CREATE,
link_label='remote_folder')
remote_folder.store()
return node |
Python | def parse(self, **kwargs): # pylint: disable=too-many-locals, inconsistent-return-statements
"""
Parse outputs, store results in database.
:returns: an exit code, if parsing fails (or nothing if parsing succeeds)
"""
from aiida.orm import SinglefileData, List
output_filename = self.node.get_option('output_filename')
pickle_filename = self.node.inputs.data_file.value
# Check that folder content is as expected
files_retrieved = self.retrieved.list_object_names()
files_expected = [output_filename, pickle_filename]
# Note: set(A) <= set(B) checks whether A is a subset of B
if not set(files_expected) <= set(files_retrieved):
self.logger.error(
f"Found files '{files_retrieved}', expected to find '{files_expected}'"
)
return self.exit_codes.ERROR_MISSING_OUTPUT_FILES
# add output file
self.logger.info(f"Parsing '{output_filename}'")
with self.retrieved.open(output_filename, 'rb') as handle:
output_node = SinglefileData(file=handle)
self.out('log', output_node)
# Parsing the pickle file
self.logger.info(f"Parsing '{pickle_filename}'")
# pickledata = pickle.load(self.retrieved.open(pickle_filename, 'rb'))
with self.retrieved.open(pickle_filename, 'rb') as handle:
pickledata = pickle.load(handle)
try:
coverage_data = [[a[0], list(map(float, a[1]))]
for a in pickledata['coverage_map']]
except KeyError:
return self.exit_codes.ERROR_NO_PICKLE_FILE
## Choose not to change the mpmath format
## the downside is that mpmath must then be present
## wherever this is being parsed
rate_data = [[a[0], list(map(float, a[1]))]
for a in pickledata['rate_map']]
production_data = [[a[0], list(map(float, a[1]))]
for a in pickledata['production_rate_map']]
coverage_map = List(list=coverage_data)
rate_map = List(list=rate_data)
production_map = List(list=production_data)
## The three main outputs
## The solution to the kinetic model - coverages
## The rate and the production rate also provided
self.out('coverage_map', coverage_map)
self.out('rate_map', rate_map)
self.out('production_rate_map', production_map) | def parse(self, **kwargs): # pylint: disable=too-many-locals, inconsistent-return-statements
"""
Parse outputs, store results in database.
:returns: an exit code, if parsing fails (or nothing if parsing succeeds)
"""
from aiida.orm import SinglefileData, List
output_filename = self.node.get_option('output_filename')
pickle_filename = self.node.inputs.data_file.value
# Check that folder content is as expected
files_retrieved = self.retrieved.list_object_names()
files_expected = [output_filename, pickle_filename]
# Note: set(A) <= set(B) checks whether A is a subset of B
if not set(files_expected) <= set(files_retrieved):
self.logger.error(
f"Found files '{files_retrieved}', expected to find '{files_expected}'"
)
return self.exit_codes.ERROR_MISSING_OUTPUT_FILES
# add output file
self.logger.info(f"Parsing '{output_filename}'")
with self.retrieved.open(output_filename, 'rb') as handle:
output_node = SinglefileData(file=handle)
self.out('log', output_node)
# Parsing the pickle file
self.logger.info(f"Parsing '{pickle_filename}'")
# pickledata = pickle.load(self.retrieved.open(pickle_filename, 'rb'))
with self.retrieved.open(pickle_filename, 'rb') as handle:
pickledata = pickle.load(handle)
try:
coverage_data = [[a[0], list(map(float, a[1]))]
for a in pickledata['coverage_map']]
except KeyError:
return self.exit_codes.ERROR_NO_PICKLE_FILE
## Choose not to change the mpmath format
## the downside is that mpmath must then be present
## wherever this is being parsed
rate_data = [[a[0], list(map(float, a[1]))]
for a in pickledata['rate_map']]
production_data = [[a[0], list(map(float, a[1]))]
for a in pickledata['production_rate_map']]
coverage_map = List(list=coverage_data)
rate_map = List(list=rate_data)
production_map = List(list=production_data)
## The three main outputs
## The solution to the kinetic model - coverages
## The rate and the production rate also provided
self.out('coverage_map', coverage_map)
self.out('rate_map', rate_map)
self.out('production_rate_map', production_map) |
Python | def define(cls, spec):
"""Define inputs and outputs of the calculation."""
# yapf: disable
super(CatMAPCalculation, cls).define(spec)
# set default values for AiiDA-CatMAP options
# Right now the tool allows only serial runs this might change
spec.inputs['metadata']['options']['resources'].default = {
'num_machines': 1,
'num_mpiprocs_per_machine': 1,
}
# new ports
## INPUTS
### Decide if you are doing electrocatalysis or thermal catalysis; not a catmap input
spec.input('electrocatal', valid_type=Bool, help='If this is an electrocatalysis run, specify here', default=lambda: Bool(True))
### Reaction condition keys
spec.input('energies', valid_type=SinglefileData, help='energies.txt that stores all the energy inputs')
spec.input('scaler', valid_type=Str, help='Scaler to be used in the Kinetic model', default=lambda: Str('GeneralizedLinearScaler'))
spec.input('rxn_expressions', valid_type=List, help='Reactions expressions')
spec.input('surface_names', valid_type=List, help='Surfaces to calculate with energies in energies.txt')
spec.input('descriptor_names', valid_type=List, help='Descriptors')
spec.input('descriptor_ranges', valid_type=List, help='List of lists which has the two ranges')
spec.input('resolution', valid_type=Int, help='Resolution of calculation')
spec.input('temperature', valid_type=Float, help='temperature to run calculation at')
spec.input('species_definitions', valid_type=Dict, help='Dict consisting of all species definitions')
spec.input('gas_thermo_mode', valid_type=Str, help='Gas thermodynamics mode')
spec.input('adsorbate_thermo_mode', valid_type=Str, help='Adsorbate thermodyamics mode')
spec.input('scaling_constraint_dict', valid_type=Dict, help='Scaling constraints', required=False)
spec.input('numerical_solver', valid_type=Str, help='Numerical solver to be used', required=False, default=lambda: Str('coverages'))
spec.input('decimal_precision', valid_type=Int, help='Decimal precision of code')
spec.input('tolerance', valid_type=Float, help='Tolerance of calculation')
spec.input('max_rootfinding_iterations', valid_type=Int, help='Maximum root finding iterations')
spec.input('max_bisections', valid_type=Int, help='Maximum bisections for root finding algorithm')
spec.input('mkm_filename', valid_type=Str, required=False, default=lambda: Str(cls._INPUT_FILE_NAME))
spec.input('data_file', valid_type=Str, required=False, default=lambda: Str('aiida.pickle'))
spec.input('ideal_gas_params', valid_type=Dict, required=False, help='Ideal gas parameters to inferface with ASE')
### Keys for electrochemistry
spec.input('voltage', valid_type=Float, required=False, help='Potential on an SHE scale')
spec.input('electrochemical_thermo_mode', valid_type=List, help='Electrochemical thermodyamics mode', required=False)
spec.input('pH', valid_type=Float, required=False, help='pH')
spec.input('beta', valid_type=Float, required=False, default=lambda: Float(0.5))
spec.input('potential_reference_scale', valid_type=Str, required=False, default=lambda: Str('SHE'))
spec.input('extrapolated_potential', valid_type=Float, required=False, default=lambda: Float(0.0))
spec.input('voltage_diff_drop', valid_type=Float, required=False, default=lambda: Float(0.0))
spec.input('sigma_input', valid_type=List, required=False, default=lambda: List(list=['CH', 0]))
spec.input('Upzc', valid_type=Float, required=False, default=lambda: Float(0.0))
## METADATA
spec.inputs['metadata']['options']['parser_name'].default = 'catmap'
spec.inputs['metadata']['options']['input_filename'].default = 'mkm_job.py'
spec.inputs['metadata']['options']['output_filename'].default = 'aiida.out'
## OUTPUTS
spec.output('log', valid_type=SinglefileData, help='Log file from CatMAP')
spec.output('coverage_map', valid_type=List, help='Coverage Map generated after a completed CatMAP run')
spec.output('rate_map', valid_type=List, help='Rate Map generated after a completed CatMAP run')
spec.output('production_rate_map', valid_type=List, help='Production Rate Map generated after a completed CatMAP run')
spec.exit_code(100, 'ERROR_MISSING_OUTPUT_FILES', message='Calculation did not produce all expected output files.')
spec.exit_code(500, 'ERROR_NO_PICKLE_FILE', message='No information stored in the pickle file') | def define(cls, spec):
"""Define inputs and outputs of the calculation."""
# yapf: disable
super(CatMAPCalculation, cls).define(spec)
# set default values for AiiDA-CatMAP options
# Right now the tool allows only serial runs this might change
spec.inputs['metadata']['options']['resources'].default = {
'num_machines': 1,
'num_mpiprocs_per_machine': 1,
}
# new ports
## INPUTS
### Decide if you are doing electrocatalysis or thermal catalysis; not a catmap input
spec.input('electrocatal', valid_type=Bool, help='If this is an electrocatalysis run, specify here', default=lambda: Bool(True))
### Reaction condition keys
spec.input('energies', valid_type=SinglefileData, help='energies.txt that stores all the energy inputs')
spec.input('scaler', valid_type=Str, help='Scaler to be used in the Kinetic model', default=lambda: Str('GeneralizedLinearScaler'))
spec.input('rxn_expressions', valid_type=List, help='Reactions expressions')
spec.input('surface_names', valid_type=List, help='Surfaces to calculate with energies in energies.txt')
spec.input('descriptor_names', valid_type=List, help='Descriptors')
spec.input('descriptor_ranges', valid_type=List, help='List of lists which has the two ranges')
spec.input('resolution', valid_type=Int, help='Resolution of calculation')
spec.input('temperature', valid_type=Float, help='temperature to run calculation at')
spec.input('species_definitions', valid_type=Dict, help='Dict consisting of all species definitions')
spec.input('gas_thermo_mode', valid_type=Str, help='Gas thermodynamics mode')
spec.input('adsorbate_thermo_mode', valid_type=Str, help='Adsorbate thermodyamics mode')
spec.input('scaling_constraint_dict', valid_type=Dict, help='Scaling constraints', required=False)
spec.input('numerical_solver', valid_type=Str, help='Numerical solver to be used', required=False, default=lambda: Str('coverages'))
spec.input('decimal_precision', valid_type=Int, help='Decimal precision of code')
spec.input('tolerance', valid_type=Float, help='Tolerance of calculation')
spec.input('max_rootfinding_iterations', valid_type=Int, help='Maximum root finding iterations')
spec.input('max_bisections', valid_type=Int, help='Maximum bisections for root finding algorithm')
spec.input('mkm_filename', valid_type=Str, required=False, default=lambda: Str(cls._INPUT_FILE_NAME))
spec.input('data_file', valid_type=Str, required=False, default=lambda: Str('aiida.pickle'))
spec.input('ideal_gas_params', valid_type=Dict, required=False, help='Ideal gas parameters to inferface with ASE')
### Keys for electrochemistry
spec.input('voltage', valid_type=Float, required=False, help='Potential on an SHE scale')
spec.input('electrochemical_thermo_mode', valid_type=List, help='Electrochemical thermodyamics mode', required=False)
spec.input('pH', valid_type=Float, required=False, help='pH')
spec.input('beta', valid_type=Float, required=False, default=lambda: Float(0.5))
spec.input('potential_reference_scale', valid_type=Str, required=False, default=lambda: Str('SHE'))
spec.input('extrapolated_potential', valid_type=Float, required=False, default=lambda: Float(0.0))
spec.input('voltage_diff_drop', valid_type=Float, required=False, default=lambda: Float(0.0))
spec.input('sigma_input', valid_type=List, required=False, default=lambda: List(list=['CH', 0]))
spec.input('Upzc', valid_type=Float, required=False, default=lambda: Float(0.0))
## METADATA
spec.inputs['metadata']['options']['parser_name'].default = 'catmap'
spec.inputs['metadata']['options']['input_filename'].default = 'mkm_job.py'
spec.inputs['metadata']['options']['output_filename'].default = 'aiida.out'
## OUTPUTS
spec.output('log', valid_type=SinglefileData, help='Log file from CatMAP')
spec.output('coverage_map', valid_type=List, help='Coverage Map generated after a completed CatMAP run')
spec.output('rate_map', valid_type=List, help='Rate Map generated after a completed CatMAP run')
spec.output('production_rate_map', valid_type=List, help='Production Rate Map generated after a completed CatMAP run')
spec.exit_code(100, 'ERROR_MISSING_OUTPUT_FILES', message='Calculation did not produce all expected output files.')
spec.exit_code(500, 'ERROR_NO_PICKLE_FILE', message='No information stored in the pickle file') |
Python | def pulsing_light(strip, wait_ms=50, iterations=10):
"""Show a pulsing light with increasing and decreasing circular brightness """
import math
position = 0
for i in range(strip.numPixels() * 2):
position = position+1
for j in range(strip.numPixels()):
strip.setPixelColor(j,Color(round(((math.sin(j+position) * 127 + 128)/255)*255),round(((math.sin(j+position) * 127 + 128) /255)*100), round(((math.sin(j+position) * 127 + 128) /255)*100)))
strip.show()
time.sleep(wait_ms/1000.0) | def pulsing_light(strip, wait_ms=50, iterations=10):
"""Show a pulsing light with increasing and decreasing circular brightness """
import math
position = 0
for i in range(strip.numPixels() * 2):
position = position+1
for j in range(strip.numPixels()):
strip.setPixelColor(j,Color(round(((math.sin(j+position) * 127 + 128)/255)*255),round(((math.sin(j+position) * 127 + 128) /255)*100), round(((math.sin(j+position) * 127 + 128) /255)*100)))
strip.show()
time.sleep(wait_ms/1000.0) |
Python | def strobe(strip, wait_ms=400, strobe_count=7, pulse_count=12):
from random import randrange
"""Shows an bright light reflex with different pulse"""
for strobe in range(strobe_count):
for pulse in range(pulse_count):
for i in range(strip.numPixels()):
strip.setPixelColorRGB(i, 255,255,255)
strip.show()
time.sleep(randrange(0,45,1)/1000.0)
for i in range(strip.numPixels()):
strip.setPixelColorRGB(i, 0,0,0)
strip.show()
time.sleep(wait_ms/1000.0) | def strobe(strip, wait_ms=400, strobe_count=7, pulse_count=12):
from random import randrange
"""Shows an bright light reflex with different pulse"""
for strobe in range(strobe_count):
for pulse in range(pulse_count):
for i in range(strip.numPixels()):
strip.setPixelColorRGB(i, 255,255,255)
strip.show()
time.sleep(randrange(0,45,1)/1000.0)
for i in range(strip.numPixels()):
strip.setPixelColorRGB(i, 0,0,0)
strip.show()
time.sleep(wait_ms/1000.0) |
Python | def snow_sparkle(strip,sparkle_delay=20):
"""Shows different pixel flickering on LED Strip"""
from random import randint
pixel= randint(0,strip.numPixels())
speed_delay=randint(100,1000)
for i in range(strip.numPixels()):
strip.setPixelColor(i, Color(0x10,0x10,0x10))
strip.show()
time.sleep(speed_delay/1000.0)
strip.setPixelColorRGB(pixel, 255,255,255)
strip.show()
time.sleep(sparkle_delay/1000.0) | def snow_sparkle(strip,sparkle_delay=20):
"""Shows different pixel flickering on LED Strip"""
from random import randint
pixel= randint(0,strip.numPixels())
speed_delay=randint(100,1000)
for i in range(strip.numPixels()):
strip.setPixelColor(i, Color(0x10,0x10,0x10))
strip.show()
time.sleep(speed_delay/1000.0)
strip.setPixelColorRGB(pixel, 255,255,255)
strip.show()
time.sleep(sparkle_delay/1000.0) |
Python | def bouncing_balls(strip, playtime, ball_count=2, wait_ms=200):
"""Shows an accelerated pixel with physical behavour like a ball in a flipper game"""
import time, math
start_time = time.time()
ClockTimeSinceLastBounce = [0 for i in range(ball_count)]
StartHeight=1
for i in range(ball_count):
ClockTimeSinceLastBounce[i] = time.time()
Height = [0 for i in range(ball_count)]
Position = [0 for i in range(ball_count)]
ImpactVelocity = [0 for i in range(ball_count)]
ImpactVelocityStart= math.sqrt(-2 * -9.81 * 1)
Dampening = [0 for i in range(ball_count)]
TimeSinceLastBounce = [0 for i in range(ball_count)]
for i in range(0,ball_count,1):
last_ClockTimeSinceLastBounce = ClockTimeSinceLastBounce[i]
ClockTimeSinceLastBounce[i] = time.time() - last_ClockTimeSinceLastBounce
Height[i] = StartHeight
Position[i] = 0
ImpactVelocity[i] = math.sqrt(-2 * -9.81 * 1)
TimeSinceLastBounce[i] = 0
Dampening[i] = 0.90 - (float(i)/(ball_count**2))
act_time = time.time()
while ((act_time+ playtime)> time.time()):
for i in range(ball_count):
TimeSinceLastBounce[i] = time.time() - ClockTimeSinceLastBounce[i]
Height[i] = 0.5 * (-9.81) * (TimeSinceLastBounce[i]**2) + ImpactVelocity[i] * TimeSinceLastBounce[i]
if (Height[i] < 0):
Height[i] = 0
ImpactVelocity[i] = Dampening[i] * ImpactVelocity[i]
ClockTimeSinceLastBounce[i] = time.time()
if (ImpactVelocity[i] < 0.01):
ImpactVelocity[i] = ImpactVelocityStart
Position[i] = round(Height[i] * (strip.numPixels()-1)/StartHeight) #Hier wird die relative Höhe auf die absolute Höhe mit der LED Anzahl umgewandelt.
for i in range(ball_count):
strip.setPixelColorRGB(Position[i], 0, 0,255)
strip.show()
for i in range(strip.numPixels()):
strip.setPixelColorRGB(i, 0,0,0) | def bouncing_balls(strip, playtime, ball_count=2, wait_ms=200):
"""Shows an accelerated pixel with physical behavour like a ball in a flipper game"""
import time, math
start_time = time.time()
ClockTimeSinceLastBounce = [0 for i in range(ball_count)]
StartHeight=1
for i in range(ball_count):
ClockTimeSinceLastBounce[i] = time.time()
Height = [0 for i in range(ball_count)]
Position = [0 for i in range(ball_count)]
ImpactVelocity = [0 for i in range(ball_count)]
ImpactVelocityStart= math.sqrt(-2 * -9.81 * 1)
Dampening = [0 for i in range(ball_count)]
TimeSinceLastBounce = [0 for i in range(ball_count)]
for i in range(0,ball_count,1):
last_ClockTimeSinceLastBounce = ClockTimeSinceLastBounce[i]
ClockTimeSinceLastBounce[i] = time.time() - last_ClockTimeSinceLastBounce
Height[i] = StartHeight
Position[i] = 0
ImpactVelocity[i] = math.sqrt(-2 * -9.81 * 1)
TimeSinceLastBounce[i] = 0
Dampening[i] = 0.90 - (float(i)/(ball_count**2))
act_time = time.time()
while ((act_time+ playtime)> time.time()):
for i in range(ball_count):
TimeSinceLastBounce[i] = time.time() - ClockTimeSinceLastBounce[i]
Height[i] = 0.5 * (-9.81) * (TimeSinceLastBounce[i]**2) + ImpactVelocity[i] * TimeSinceLastBounce[i]
if (Height[i] < 0):
Height[i] = 0
ImpactVelocity[i] = Dampening[i] * ImpactVelocity[i]
ClockTimeSinceLastBounce[i] = time.time()
if (ImpactVelocity[i] < 0.01):
ImpactVelocity[i] = ImpactVelocityStart
Position[i] = round(Height[i] * (strip.numPixels()-1)/StartHeight) #Hier wird die relative Höhe auf die absolute Höhe mit der LED Anzahl umgewandelt.
for i in range(ball_count):
strip.setPixelColorRGB(Position[i], 0, 0,255)
strip.show()
for i in range(strip.numPixels()):
strip.setPixelColorRGB(i, 0,0,0) |
Python | def solve_dare(A, B, Q, R):
"""
solve a discrete time_Algebraic Riccati equation (DARE)
"""
x = Q
x_next = Q
max_iter = 150
eps = 0.01
for i in range(max_iter):
# x_next = A.T @ x @ A - A.T @ x @ B @ la.inv(R + B.T @ x @ B) @ B.T @ x @ A + Q
x_next = np.dot(np.dot(A.T, x), A) - np.dot(np.dot(np.dot(np.dot(np.dot(np.dot(A.T, x), B), la.inv(R + np.dot(np.dot(B.T, x), B))), B.T), x), A) + Q
if (abs(x_next - x)).max() < eps:
break
x = x_next
return x_next | def solve_dare(A, B, Q, R):
"""
solve a discrete time_Algebraic Riccati equation (DARE)
"""
x = Q
x_next = Q
max_iter = 150
eps = 0.01
for i in range(max_iter):
# x_next = A.T @ x @ A - A.T @ x @ B @ la.inv(R + B.T @ x @ B) @ B.T @ x @ A + Q
x_next = np.dot(np.dot(A.T, x), A) - np.dot(np.dot(np.dot(np.dot(np.dot(np.dot(A.T, x), B), la.inv(R + np.dot(np.dot(B.T, x), B))), B.T), x), A) + Q
if (abs(x_next - x)).max() < eps:
break
x = x_next
return x_next |
Python | def dlqr(A, B, Q, R):
"""Solve the discrete time lqr controller.
x[k+1] = A x[k] + B u[k]
cost = sum x[k].T*Q*x[k] + u[k].T*R*u[k]
# ref Bertsekas, p.151
"""
# first, try to solve the ricatti equation
X = solve_dare(A, B, Q, R)
# compute the LQR gain
# K = la.inv(B.T @ X @ B + R) @ (B.T @ X @ A)
K = np.dot(la.inv(np.dot(np.dot(B.T, X), B) + R), np.dot(np.dot(B.T, X), A))
# eig_result = la.eig(A - B @ K)
eig_result = la.eig(A - np.dot(B, K))
return K, X, eig_result[0] | def dlqr(A, B, Q, R):
"""Solve the discrete time lqr controller.
x[k+1] = A x[k] + B u[k]
cost = sum x[k].T*Q*x[k] + u[k].T*R*u[k]
# ref Bertsekas, p.151
"""
# first, try to solve the ricatti equation
X = solve_dare(A, B, Q, R)
# compute the LQR gain
# K = la.inv(B.T @ X @ B + R) @ (B.T @ X @ A)
K = np.dot(la.inv(np.dot(np.dot(B.T, X), B) + R), np.dot(np.dot(B.T, X), A))
# eig_result = la.eig(A - B @ K)
eig_result = la.eig(A - np.dot(B, K))
return K, X, eig_result[0] |
Python | def disconnect(self: 'TelegramClient'):
"""
Disconnects from Telegram.
If the event loop is already running, this method returns a
coroutine that you should await on your own code; otherwise
the loop is ran until said coroutine completes.
Example
.. code-block:: python
# You don't need to use this if you used "with client"
await client.disconnect()
"""
if self._loop.is_running():
return self._disconnect_coro()
else:
try:
self._loop.run_until_complete(self._disconnect_coro())
except RuntimeError:
# Python 3.5.x complains when called from
# `__aexit__` and there were pending updates with:
# "Event loop stopped before Future completed."
#
# However, it doesn't really make a lot of sense.
pass | def disconnect(self: 'TelegramClient'):
"""
Disconnects from Telegram.
If the event loop is already running, this method returns a
coroutine that you should await on your own code; otherwise
the loop is ran until said coroutine completes.
Example
.. code-block:: python
# You don't need to use this if you used "with client"
await client.disconnect()
"""
if self._loop.is_running():
return self._disconnect_coro()
else:
try:
self._loop.run_until_complete(self._disconnect_coro())
except RuntimeError:
# Python 3.5.x complains when called from
# `__aexit__` and there were pending updates with:
# "Event loop stopped before Future completed."
#
# However, it doesn't really make a lot of sense.
pass |
Python | async def _disconnect(self: 'TelegramClient'):
"""
Disconnect only, without closing the session. Used in reconnections
to different data centers, where we don't want to close the session
file; user disconnects however should close it since it means that
their job with the client is complete and we should clean it up all.
"""
await self._sender.disconnect()
await helpers._cancel(self._log[__name__],
updates_handle=self._updates_handle) | async def _disconnect(self: 'TelegramClient'):
"""
Disconnect only, without closing the session. Used in reconnections
to different data centers, where we don't want to close the session
file; user disconnects however should close it since it means that
their job with the client is complete and we should clean it up all.
"""
await self._sender.disconnect()
await helpers._cancel(self._log[__name__],
updates_handle=self._updates_handle) |
Python | async def _switch_dc(self: 'TelegramClient', new_dc):
"""
Permanently switches the current connection to the new data center.
"""
self._log[__name__].info('Reconnecting to new data center %s', new_dc)
dc = await self._get_dc(new_dc)
await self.session.set_dc(dc.id, dc.ip_address, dc.port)
# auth_key's are associated with a server, which has now changed
# so it's not valid anymore. Set to None to force recreating it.
self._sender.auth_key.key = None
await self.session.set_auth_key(None)
await self.session.save()
await self._disconnect()
return await self.connect() | async def _switch_dc(self: 'TelegramClient', new_dc):
"""
Permanently switches the current connection to the new data center.
"""
self._log[__name__].info('Reconnecting to new data center %s', new_dc)
dc = await self._get_dc(new_dc)
await self.session.set_dc(dc.id, dc.ip_address, dc.port)
# auth_key's are associated with a server, which has now changed
# so it's not valid anymore. Set to None to force recreating it.
self._sender.auth_key.key = None
await self.session.set_auth_key(None)
await self.session.save()
await self._disconnect()
return await self.connect() |
Python | async def _auth_key_callback(self: 'TelegramClient', auth_key):
"""
Callback from the sender whenever it needed to generate a
new authorization key. This means we are not authorized.
"""
await self.session.set_auth_key(auth_key)
await self.session.save() | async def _auth_key_callback(self: 'TelegramClient', auth_key):
"""
Callback from the sender whenever it needed to generate a
new authorization key. This means we are not authorized.
"""
await self.session.set_auth_key(auth_key)
await self.session.save() |
Python | async def _borrow_exported_sender(self: 'TelegramClient', dc_id):
"""
Borrows a connected `MTProtoSender` for the given `dc_id`.
If it's not cached, creates a new one if it doesn't exist yet,
and imports a freshly exported authorization key for it to be usable.
Once its job is over it should be `_return_exported_sender`.
"""
async with self._borrow_sender_lock:
self._log[__name__].debug('Borrowing sender for dc_id %d', dc_id)
state, sender = self._borrowed_senders.get(dc_id, (None, None))
if state is None:
state = _ExportState()
sender = await self._create_exported_sender(dc_id)
sender.dc_id = dc_id
self._borrowed_senders[dc_id] = (state, sender)
elif state.need_connect():
dc = await self._get_dc(dc_id)
await sender.connect(self._connection(
dc.ip_address,
dc.port,
dc.id,
loop=self._loop,
loggers=self._log,
proxy=self._proxy
))
state.add_borrow()
return sender | async def _borrow_exported_sender(self: 'TelegramClient', dc_id):
"""
Borrows a connected `MTProtoSender` for the given `dc_id`.
If it's not cached, creates a new one if it doesn't exist yet,
and imports a freshly exported authorization key for it to be usable.
Once its job is over it should be `_return_exported_sender`.
"""
async with self._borrow_sender_lock:
self._log[__name__].debug('Borrowing sender for dc_id %d', dc_id)
state, sender = self._borrowed_senders.get(dc_id, (None, None))
if state is None:
state = _ExportState()
sender = await self._create_exported_sender(dc_id)
sender.dc_id = dc_id
self._borrowed_senders[dc_id] = (state, sender)
elif state.need_connect():
dc = await self._get_dc(dc_id)
await sender.connect(self._connection(
dc.ip_address,
dc.port,
dc.id,
loop=self._loop,
loggers=self._log,
proxy=self._proxy
))
state.add_borrow()
return sender |
Python | async def _return_exported_sender(self: 'TelegramClient', sender):
"""
Returns a borrowed exported sender. If all borrows have
been returned, the sender is cleanly disconnected.
"""
async with self._borrow_sender_lock:
self._log[__name__].debug('Returning borrowed sender for dc_id %d', sender.dc_id)
state, _ = self._borrowed_senders[sender.dc_id]
state.add_return() | async def _return_exported_sender(self: 'TelegramClient', sender):
"""
Returns a borrowed exported sender. If all borrows have
been returned, the sender is cleanly disconnected.
"""
async with self._borrow_sender_lock:
self._log[__name__].debug('Returning borrowed sender for dc_id %d', sender.dc_id)
state, _ = self._borrowed_senders[sender.dc_id]
state.add_return() |
Python | async def _clean_exported_senders(self: 'TelegramClient'):
"""
Cleans-up all unused exported senders by disconnecting them.
"""
async with self._borrow_sender_lock:
for dc_id, (state, sender) in self._borrowed_senders.items():
if state.should_disconnect():
self._log[__name__].info(
'Disconnecting borrowed sender for DC %d', dc_id)
# Disconnect should never raise
await sender.disconnect()
state.mark_disconnected() | async def _clean_exported_senders(self: 'TelegramClient'):
"""
Cleans-up all unused exported senders by disconnecting them.
"""
async with self._borrow_sender_lock:
for dc_id, (state, sender) in self._borrowed_senders.items():
if state.should_disconnect():
self._log[__name__].info(
'Disconnecting borrowed sender for DC %d', dc_id)
# Disconnect should never raise
await sender.disconnect()
state.mark_disconnected() |
Python | async def dc_id(self):
"""
Returns the currently-used data center ID.
"""
raise NotImplementedError | async def dc_id(self):
"""
Returns the currently-used data center ID.
"""
raise NotImplementedError |
Python | async def server_address(self):
"""
Returns the server address where the library should connect to.
"""
raise NotImplementedError | async def server_address(self):
"""
Returns the server address where the library should connect to.
"""
raise NotImplementedError |
Python | async def port(self):
"""
Returns the port to which the library should connect to.
"""
raise NotImplementedError | async def port(self):
"""
Returns the port to which the library should connect to.
"""
raise NotImplementedError |
Python | async def takeout_id(self):
"""
Returns an ID of the takeout process initialized for this session,
or `None` if there's no were any unfinished takeout requests.
"""
raise NotImplementedError | async def takeout_id(self):
"""
Returns an ID of the takeout process initialized for this session,
or `None` if there's no were any unfinished takeout requests.
"""
raise NotImplementedError |
Python | async def list_sessions(cls):
"""
Lists available sessions. Not used by the library itself.
"""
return [] | async def list_sessions(cls):
"""
Lists available sessions. Not used by the library itself.
"""
return [] |
Python | async def cache_file(self, md5_digest, file_size, instance):
"""
Caches the given file information persistently, so that it
doesn't need to be re-uploaded in case the file is used again.
The ``instance`` will be either an ``InputPhoto`` or ``InputDocument``,
both with an ``.id`` and ``.access_hash`` attributes.
"""
raise NotImplementedError | async def cache_file(self, md5_digest, file_size, instance):
"""
Caches the given file information persistently, so that it
doesn't need to be re-uploaded in case the file is used again.
The ``instance`` will be either an ``InputPhoto`` or ``InputDocument``,
both with an ``.id`` and ``.access_hash`` attributes.
"""
raise NotImplementedError |
Python | def with_pool(
cls,
asyncpg_pool: asyncpg.pool.Pool,
session_id_factory: Callable[[], str] = default_session_id_factory,
save_entities: bool = True
) -> 'AsyncpgSession':
"""
Another implementation of initializer but for shared pool
:param asyncpg_pool: ready asyncpg Pool
:param session_id_factory: ...
:param save_entities: ...
:return: instance of AsyncpgSession
"""
self = cls(
asyncpg_conf=None,
session_id_factory=session_id_factory,
save_entities=save_entities
) # type: ignore
self._pool = asyncpg_pool
return self | def with_pool(
cls,
asyncpg_pool: asyncpg.pool.Pool,
session_id_factory: Callable[[], str] = default_session_id_factory,
save_entities: bool = True
) -> 'AsyncpgSession':
"""
Another implementation of initializer but for shared pool
:param asyncpg_pool: ready asyncpg Pool
:param session_id_factory: ...
:param save_entities: ...
:return: instance of AsyncpgSession
"""
self = cls(
asyncpg_conf=None,
session_id_factory=session_id_factory,
save_entities=save_entities
) # type: ignore
self._pool = asyncpg_pool
return self |
Python | async def process_entities(self, tlo):
"""Processes all the found entities on the given TLObject,
unless .enabled is False.
Returns True if new input entities were added.
"""
if not self.save_entities:
return
rows = self._entities_to_rows(tlo)
if not rows:
return
query = """
insert into asyncpg_telethon.entities(session_id, id, hash, username, phone, name)
values ($1,$2,$3,$4,$5,$6)
on conflict(session_id, id) do
update set id = $2, hash = $3, username = $4, phone = $5, name = $6
where entities.session_id = $1;
"""
for n, row in enumerate(rows):
row = list(row)
row.insert(0, self._session_id)
rows[n] = row
async with self._pool.acquire() as conn: # type: asyncpg.Connection
await conn.executemany(query, rows) | async def process_entities(self, tlo):
"""Processes all the found entities on the given TLObject,
unless .enabled is False.
Returns True if new input entities were added.
"""
if not self.save_entities:
return
rows = self._entities_to_rows(tlo)
if not rows:
return
query = """
insert into asyncpg_telethon.entities(session_id, id, hash, username, phone, name)
values ($1,$2,$3,$4,$5,$6)
on conflict(session_id, id) do
update set id = $2, hash = $3, username = $4, phone = $5, name = $6
where entities.session_id = $1;
"""
for n, row in enumerate(rows):
row = list(row)
row.insert(0, self._session_id)
rows[n] = row
async with self._pool.acquire() as conn: # type: asyncpg.Connection
await conn.executemany(query, rows) |
Python | async def _get_entities_by_x(self, coln: str, colval: str) -> List[asyncpg.Record]:
"""
_get_entities_by_x should never been called from outside.
"""
if coln not in ALLOWED_ENTITY_IDENTIFIER_NAMES:
raise RuntimeWarning(f"{coln!s} is not a valid tablename for entity")
query = """
select id, hash from asyncpg_telethon.entities
where
entities.session_id = $1 and $2 = $3;
"""
async with self._pool.acquire() as conn: # type: asyncpg.Connection
return await conn.fetch(query, self._session_id, coln, colval,) | async def _get_entities_by_x(self, coln: str, colval: str) -> List[asyncpg.Record]:
"""
_get_entities_by_x should never been called from outside.
"""
if coln not in ALLOWED_ENTITY_IDENTIFIER_NAMES:
raise RuntimeWarning(f"{coln!s} is not a valid tablename for entity")
query = """
select id, hash from asyncpg_telethon.entities
where
entities.session_id = $1 and $2 = $3;
"""
async with self._pool.acquire() as conn: # type: asyncpg.Connection
return await conn.fetch(query, self._session_id, coln, colval,) |
Python | def render(root, con, panel, cells):
"""Renders the world of cells depending on their state."""
for x in range(WORLD_WIDTH):
for y in range(WORLD_HEIGHT):
if cells[x, y] == 1:
con.draw_char(x, y, None, bg=COLOR_ON)
else:
con.draw_char(x, y, None, bg=COLOR_OFF)
panel.draw_rect(0,0,None,None,None,bg=(97,117,197))
panel.draw_str(42,2, 'Generation: ' + str(GENERATION), bg=(97,117,197))
root.blit(panel,0, PANEL_Y, CONSOLE_WIDTH, PANEL_HEIGHT, 0, 0)
root.blit(con, 0, 0, WORLD_WIDTH, WORLD_HEIGHT, 0, 0) | def render(root, con, panel, cells):
"""Renders the world of cells depending on their state."""
for x in range(WORLD_WIDTH):
for y in range(WORLD_HEIGHT):
if cells[x, y] == 1:
con.draw_char(x, y, None, bg=COLOR_ON)
else:
con.draw_char(x, y, None, bg=COLOR_OFF)
panel.draw_rect(0,0,None,None,None,bg=(97,117,197))
panel.draw_str(42,2, 'Generation: ' + str(GENERATION), bg=(97,117,197))
root.blit(panel,0, PANEL_Y, CONSOLE_WIDTH, PANEL_HEIGHT, 0, 0)
root.blit(con, 0, 0, WORLD_WIDTH, WORLD_HEIGHT, 0, 0) |
Python | def update(cells):
"""Updates the cell state using conway's rules."""
global GENERATION
new_cells = cells.copy()
for x in range(WORLD_WIDTH):
for y in range(WORLD_HEIGHT):
# Sums the total of neighbors that are ON
neighbors = int(cells[(x-1)%WORLD_WIDTH, (y-1)%WORLD_WIDTH] + cells[x, (y-1)%WORLD_WIDTH] + cells[(x+1)%WORLD_WIDTH, (y-1)%WORLD_WIDTH]
+ cells[(x-1)%WORLD_WIDTH, y] + cells[(x+1)%WORLD_WIDTH, y]
+ cells[(x-1)%WORLD_WIDTH, (y+1)%WORLD_WIDTH] + cells[x, (y+1)%WORLD_WIDTH] + cells[(x+1)%WORLD_WIDTH, (y+1)%WORLD_WIDTH])
if cells[x, y] == ON:
if (neighbors < 2) or (neighbors > 3):
new_cells[x, y] = OFF
else:
if neighbors == 3:
new_cells[x, y] = ON
cells[:] = new_cells[:]
GENERATION += 1 | def update(cells):
"""Updates the cell state using conway's rules."""
global GENERATION
new_cells = cells.copy()
for x in range(WORLD_WIDTH):
for y in range(WORLD_HEIGHT):
# Sums the total of neighbors that are ON
neighbors = int(cells[(x-1)%WORLD_WIDTH, (y-1)%WORLD_WIDTH] + cells[x, (y-1)%WORLD_WIDTH] + cells[(x+1)%WORLD_WIDTH, (y-1)%WORLD_WIDTH]
+ cells[(x-1)%WORLD_WIDTH, y] + cells[(x+1)%WORLD_WIDTH, y]
+ cells[(x-1)%WORLD_WIDTH, (y+1)%WORLD_WIDTH] + cells[x, (y+1)%WORLD_WIDTH] + cells[(x+1)%WORLD_WIDTH, (y+1)%WORLD_WIDTH])
if cells[x, y] == ON:
if (neighbors < 2) or (neighbors > 3):
new_cells[x, y] = OFF
else:
if neighbors == 3:
new_cells[x, y] = ON
cells[:] = new_cells[:]
GENERATION += 1 |
Python | def div(self, field, u, scheme):
"Calculate the divergence in a cell as the some of fluxes over all faces (Gauss' law)"
if scheme == "upwind":
fieldFacex, fieldFacez = self.interpolateUpwind(field, u)
else:
fieldFacex, fieldFacez = self.interpolateLinear(field)
uxFacex, uxFacez = self.interpolateLinear(u[:,:,0])
uzFacex, uzFacez = self.interpolateLinear(u[:,:,1])
uFacex = u.copy()
uFacex[:,:,0] = uxFacex
uFacex[:,:,1] = uzFacex
uFacez = u.copy()
uFacez[:,:,0] = uxFacez
uFacez[:,:,1] = uzFacez
fluxx = fieldFacex*dot(uFacex, self.mesh.xSf)/self.mesh.cellVolume
if not self.mesh.xPeriodic:
fluxx[:,0] *= 0.
fluxz = fieldFacez*dot(uFacez, self.mesh.zSf)/self.mesh.cellVolume
if not self.mesh.zPeriodic:
fluxz[-1,:] *= 0.
divergence = self.mesh.volScalarField.copy()
divergence += np.roll(fluxx, -1, axis=1) - fluxx
divergence += np.roll(fluxz, 1, axis=0) - fluxz
return divergence | def div(self, field, u, scheme):
"Calculate the divergence in a cell as the some of fluxes over all faces (Gauss' law)"
if scheme == "upwind":
fieldFacex, fieldFacez = self.interpolateUpwind(field, u)
else:
fieldFacex, fieldFacez = self.interpolateLinear(field)
uxFacex, uxFacez = self.interpolateLinear(u[:,:,0])
uzFacex, uzFacez = self.interpolateLinear(u[:,:,1])
uFacex = u.copy()
uFacex[:,:,0] = uxFacex
uFacex[:,:,1] = uzFacex
uFacez = u.copy()
uFacez[:,:,0] = uxFacez
uFacez[:,:,1] = uzFacez
fluxx = fieldFacex*dot(uFacex, self.mesh.xSf)/self.mesh.cellVolume
if not self.mesh.xPeriodic:
fluxx[:,0] *= 0.
fluxz = fieldFacez*dot(uFacez, self.mesh.zSf)/self.mesh.cellVolume
if not self.mesh.zPeriodic:
fluxz[-1,:] *= 0.
divergence = self.mesh.volScalarField.copy()
divergence += np.roll(fluxx, -1, axis=1) - fluxx
divergence += np.roll(fluxz, 1, axis=0) - fluxz
return divergence |
Python | def plotFields(id="default"):
'''
Generate plots for the acceleration and radiation profiles for a neutral atom in a
Magneto-Optical Trap with one laser pointed down on a horizontal surface where
a reflection grating is located.
NOTE: plotContour must be run before using plotFields
Args
id: Data id used for the input data filename and the output files
'''
print(f"\nPlotting fields for configuration: {id}")
folder = folders(
id = id,
folderScripts = os.path.dirname(os.path.realpath(__file__))
)
# Save fields for future use
data = np.load(os.path.join(folder.outputs, "fieldData.npz"))
slices = {}
slices["XZ"] = {
"xAxis": "x",
"yAxis": "z",
"xIndex": 0,
"yIndex": 2,
"location": "y = {:.2f} cm".format(data["axisY"][int(data["resolution"]/2)]),
"slice": np.s_[:, int(data["resolution"]/2), :]
}
slices["YZ"] = {
"xAxis": "y",
"yAxis": "z",
"xIndex": 1,
"yIndex": 2,
"location": "x = {:.2f} cm".format(data["axisX"][int(data["resolution"]/2)]),
"slice": np.s_[:, :, int(data["resolution"]/2)]
}
slices["XY"] = {
"xAxis": "x",
"yAxis": "y",
"xIndex": 0,
"yIndex": 1,
"location": "z = {:.2f} cm".format(data["axisZ"][int(data["resolution"]/2)]),
"slice": np.s_[int(data["resolution"]/2), :, :]
}
for slice in slices:
print("Plotting data for slice {}".format(slice))
s = slices[slice]
plotContour(
data["axis"+s["xAxis"].upper()],
data["axis"+s["yAxis"].upper()],
data["radPressureMag"][s["slice"]],
id=id+"_radPressure_"+slice.lower(),
folder=folder.outputs,
xlabel="{} (cm)".format(s["xAxis"]),
ylabel="{} (cm)".format(s["yAxis"]),
ylabelCBar="Relative radiation pressure",
title="Radiation pressure field, {}".format(s["location"]),
u = data["radPressure"][s["slice"]][...,s["xIndex"]],
v = data["radPressure"][s["slice"]][...,s["yIndex"]],
vectorScale = 10
)
plotContour(
data["axis"+s["xAxis"].upper()],
data["axis"+s["yAxis"].upper()],
data["aMag"][s["slice"]],
id=id+"_a_"+slice.lower(),
folder=folder.outputs,
cmap="jet_r",
xlabel="{} (cm)".format(s["xAxis"]),
ylabel="{} (cm)".format(s["yAxis"]),
ylabelCBar="Relative acceleration",
title="Acceleration field, {}".format(s["location"]),
u = data["a"][s["slice"]][...,s["xIndex"]],
v = data["a"][s["slice"]][...,s["yIndex"]],
vectorScale = 2e5
) | def plotFields(id="default"):
'''
Generate plots for the acceleration and radiation profiles for a neutral atom in a
Magneto-Optical Trap with one laser pointed down on a horizontal surface where
a reflection grating is located.
NOTE: plotContour must be run before using plotFields
Args
id: Data id used for the input data filename and the output files
'''
print(f"\nPlotting fields for configuration: {id}")
folder = folders(
id = id,
folderScripts = os.path.dirname(os.path.realpath(__file__))
)
# Save fields for future use
data = np.load(os.path.join(folder.outputs, "fieldData.npz"))
slices = {}
slices["XZ"] = {
"xAxis": "x",
"yAxis": "z",
"xIndex": 0,
"yIndex": 2,
"location": "y = {:.2f} cm".format(data["axisY"][int(data["resolution"]/2)]),
"slice": np.s_[:, int(data["resolution"]/2), :]
}
slices["YZ"] = {
"xAxis": "y",
"yAxis": "z",
"xIndex": 1,
"yIndex": 2,
"location": "x = {:.2f} cm".format(data["axisX"][int(data["resolution"]/2)]),
"slice": np.s_[:, :, int(data["resolution"]/2)]
}
slices["XY"] = {
"xAxis": "x",
"yAxis": "y",
"xIndex": 0,
"yIndex": 1,
"location": "z = {:.2f} cm".format(data["axisZ"][int(data["resolution"]/2)]),
"slice": np.s_[int(data["resolution"]/2), :, :]
}
for slice in slices:
print("Plotting data for slice {}".format(slice))
s = slices[slice]
plotContour(
data["axis"+s["xAxis"].upper()],
data["axis"+s["yAxis"].upper()],
data["radPressureMag"][s["slice"]],
id=id+"_radPressure_"+slice.lower(),
folder=folder.outputs,
xlabel="{} (cm)".format(s["xAxis"]),
ylabel="{} (cm)".format(s["yAxis"]),
ylabelCBar="Relative radiation pressure",
title="Radiation pressure field, {}".format(s["location"]),
u = data["radPressure"][s["slice"]][...,s["xIndex"]],
v = data["radPressure"][s["slice"]][...,s["yIndex"]],
vectorScale = 10
)
plotContour(
data["axis"+s["xAxis"].upper()],
data["axis"+s["yAxis"].upper()],
data["aMag"][s["slice"]],
id=id+"_a_"+slice.lower(),
folder=folder.outputs,
cmap="jet_r",
xlabel="{} (cm)".format(s["xAxis"]),
ylabel="{} (cm)".format(s["yAxis"]),
ylabelCBar="Relative acceleration",
title="Acceleration field, {}".format(s["location"]),
u = data["a"][s["slice"]][...,s["xIndex"]],
v = data["a"][s["slice"]][...,s["yIndex"]],
vectorScale = 2e5
) |
Python | def simulateAtoms(id="default"):
'''
Generate plots for the acceleration and radiation profiles for a neutral atom in a
Magneto-Optical Trap with one laser pointed down on a horizontal surface where
a reflection grating is located.
NOTE: plotContour must be run before using plotFields
Args
id: Data id used for the input data filename and the output files
'''
print(f"\nPlotting fields for configuration: {id}")
folder = folders(
id = id,
folderScripts = os.path.dirname(os.path.realpath(__file__))
)
# Save fields for future use
data = np.load(os.path.join(folder.outputs, "fieldData.npz"))
x = data["x"]
y = data["y"]
z = data["z"]
a = data["a"]
aMag = data["aMag"]
axisX = data["axisX"]
axisY = data["axisY"]
axisZ = data["axisZ"]
coords = np.stack((x,y,z), axis=-1)
# Generate particles
particles = []
nParticles = 50
for i in range(nParticles):
particles.append(
particle(
[
0.5*random.uniform(data["axisX"][0], data["axisX"][-1]),
0.5*random.uniform(data["axisY"][0], data["axisY"][-1]),
random.uniform(data["axisZ"][0], data["axisZ"][-1])
],
[0,0,0],
dragCoefficient = 10.
)
)
timestep = 5e-4
timesteps = 500
for i in range(nParticles):
print("Simulating trajectory of particle {}".format(i+1))
for n in range(timesteps):
indexX = np.argmin(np.abs(axisX-particles[i].position[0]))
indexY = np.argmin(np.abs(axisY-particles[i].position[1]))
indexZ = np.argmin(np.abs(axisZ-particles[i].position[2]))
acceleration = a[indexZ][indexY][indexX]
particles[i].move(acceleration, timestep)
# Generate 3D trajectory plots
fig = plt.figure()
ax = plt.axes(projection='3d')
# Draw grating chip
xGrating = [-1.,-1.,1.,1.]
yGrating = [-1.,1.,1.,-1.]
zGrating = [0.,0.,0.,0.]
verts = [list(zip(xGrating,yGrating,zGrating))]
poly = Poly3DCollection(verts, facecolors=[[0.5,0.5,0.5,1.]])
poly.set_sort_zpos(50)
ax.add_collection3d(poly)
# Draw laser radius
theta = np.linspace(0, 2 * np.pi, 100)
radius = 1.2
xLaser = radius * np.cos(theta)
yLaser = radius * np.sin(theta)
zLaser = np.zeros_like(xLaser)+0.01
verts = [list(zip(xLaser,yLaser,zLaser))]
poly = Poly3DCollection(verts, facecolors=[[1.,0.,0.,0.5]])
poly.set_sort_zpos(100)
ax.add_collection3d(poly)
# Highlight region with weakest force-field (where particles should converge to)
# cutOff = np.min(aMag) + 0.01*(np.mean(aMag)-np.min(aMag))
# condition = aMag < cutOff
# ax.plot(x[condition], y[condition], z[condition], ".", color="#888888")
# Draw trajectories
for i in range(nParticles):
pos = np.array(particles[i].positions)
zorder = 10000
# Deal with particles that have gone through the ground
if np.min(pos[:,2]) < 0.:
pos[:,2] = np.maximum(pos[:,2], 0*pos[:,2])
ax.plot(pos[...,0], pos[...,1], pos[...,2], zorder=zorder)
ax.plot(pos[...,0][-1], pos[...,1][-1], pos[...,2][-1], "ko", zorder=zorder)
ax.set_xlim([axisX[0], axisX[-1]])
ax.set_ylim([axisY[0], axisY[-1]])
ax.set_zlim([axisZ[0], axisZ[-1]])
ax.set_xlabel("x (cm)")
ax.set_ylabel("y (cm)")
ax.set_zlabel("z (cm)")
# Save images from different perspectives
for i in np.linspace(0,1,25):
elevation = 90*i
angle = 90+360*i
ax.view_init(elev=elevation, azim=angle)
plt.savefig(os.path.join(folder.outputs, "trajectories_{}_{}.png".format(id, int(360*i))), dpi=200)
plt.close() | def simulateAtoms(id="default"):
'''
Generate plots for the acceleration and radiation profiles for a neutral atom in a
Magneto-Optical Trap with one laser pointed down on a horizontal surface where
a reflection grating is located.
NOTE: plotContour must be run before using plotFields
Args
id: Data id used for the input data filename and the output files
'''
print(f"\nPlotting fields for configuration: {id}")
folder = folders(
id = id,
folderScripts = os.path.dirname(os.path.realpath(__file__))
)
# Save fields for future use
data = np.load(os.path.join(folder.outputs, "fieldData.npz"))
x = data["x"]
y = data["y"]
z = data["z"]
a = data["a"]
aMag = data["aMag"]
axisX = data["axisX"]
axisY = data["axisY"]
axisZ = data["axisZ"]
coords = np.stack((x,y,z), axis=-1)
# Generate particles
particles = []
nParticles = 50
for i in range(nParticles):
particles.append(
particle(
[
0.5*random.uniform(data["axisX"][0], data["axisX"][-1]),
0.5*random.uniform(data["axisY"][0], data["axisY"][-1]),
random.uniform(data["axisZ"][0], data["axisZ"][-1])
],
[0,0,0],
dragCoefficient = 10.
)
)
timestep = 5e-4
timesteps = 500
for i in range(nParticles):
print("Simulating trajectory of particle {}".format(i+1))
for n in range(timesteps):
indexX = np.argmin(np.abs(axisX-particles[i].position[0]))
indexY = np.argmin(np.abs(axisY-particles[i].position[1]))
indexZ = np.argmin(np.abs(axisZ-particles[i].position[2]))
acceleration = a[indexZ][indexY][indexX]
particles[i].move(acceleration, timestep)
# Generate 3D trajectory plots
fig = plt.figure()
ax = plt.axes(projection='3d')
# Draw grating chip
xGrating = [-1.,-1.,1.,1.]
yGrating = [-1.,1.,1.,-1.]
zGrating = [0.,0.,0.,0.]
verts = [list(zip(xGrating,yGrating,zGrating))]
poly = Poly3DCollection(verts, facecolors=[[0.5,0.5,0.5,1.]])
poly.set_sort_zpos(50)
ax.add_collection3d(poly)
# Draw laser radius
theta = np.linspace(0, 2 * np.pi, 100)
radius = 1.2
xLaser = radius * np.cos(theta)
yLaser = radius * np.sin(theta)
zLaser = np.zeros_like(xLaser)+0.01
verts = [list(zip(xLaser,yLaser,zLaser))]
poly = Poly3DCollection(verts, facecolors=[[1.,0.,0.,0.5]])
poly.set_sort_zpos(100)
ax.add_collection3d(poly)
# Highlight region with weakest force-field (where particles should converge to)
# cutOff = np.min(aMag) + 0.01*(np.mean(aMag)-np.min(aMag))
# condition = aMag < cutOff
# ax.plot(x[condition], y[condition], z[condition], ".", color="#888888")
# Draw trajectories
for i in range(nParticles):
pos = np.array(particles[i].positions)
zorder = 10000
# Deal with particles that have gone through the ground
if np.min(pos[:,2]) < 0.:
pos[:,2] = np.maximum(pos[:,2], 0*pos[:,2])
ax.plot(pos[...,0], pos[...,1], pos[...,2], zorder=zorder)
ax.plot(pos[...,0][-1], pos[...,1][-1], pos[...,2][-1], "ko", zorder=zorder)
ax.set_xlim([axisX[0], axisX[-1]])
ax.set_ylim([axisY[0], axisY[-1]])
ax.set_zlim([axisZ[0], axisZ[-1]])
ax.set_xlabel("x (cm)")
ax.set_ylabel("y (cm)")
ax.set_zlabel("z (cm)")
# Save images from different perspectives
for i in np.linspace(0,1,25):
elevation = 90*i
angle = 90+360*i
ax.view_init(elev=elevation, azim=angle)
plt.savefig(os.path.join(folder.outputs, "trajectories_{}_{}.png".format(id, int(360*i))), dpi=200)
plt.close() |
Python | def main(
id = "default",
gratingType = "triangle",
beamRadius = 1.2,
gaussian = True,
imperfection = False,
resolution = 100,
rangeX = [-0.55, 0.55],
rangeY = [-0.55, 0.55],
rangeZ = [0., 1.1],
precisionCoords = 4,
precisionData = 2
):
'''
Generate the acceleration and radiation profiles for a neutral atom in a
Magneto-Optical Trap with one laser pointed down on a horizontal surface where
a reflection grating is located.
Args
id: Data id used for the naming of output files
gratingType: Shape of grating etches/grooves. Valid parameters are "triangle" and "square"
beamRadius: The incident laser beam radius in cm.
gaussian: Is the beam profile Gaussian or uniform? Boolean only.
imperfection: If True, some of the laser beam will be diffracted to 0th order (reflection)
resolution: Resolution of the data in all 3 axes. resolution x 2 = computation x 8.
rangeX: Range of x values to be evaluated in cm.
rangeY: Range of x values to be evaluated in cm.
rangeZ: Range of x values to be evaluated in cm.
precisionCoords: Precision of coordinate data when writen to output file.
precisionData: Precision of field data when writen to output file.
'''
print(f"\nProcessin {id}")
# Calculate the acceleration and radiation pressure, save to file in root/outputs/id/
calculateFields(
id = id,
gratingType = gratingType,
beamRadius = beamRadius,
gaussian = gaussian,
imperfection = imperfection,
resolution = resolution,
rangeX = rangeX,
rangeY = rangeY,
rangeZ = rangeZ,
precisionCoords = precisionCoords,
precisionData = precisionData
)
# Plot fields and save to root/outputs/id/
plotFields(id=id) | def main(
id = "default",
gratingType = "triangle",
beamRadius = 1.2,
gaussian = True,
imperfection = False,
resolution = 100,
rangeX = [-0.55, 0.55],
rangeY = [-0.55, 0.55],
rangeZ = [0., 1.1],
precisionCoords = 4,
precisionData = 2
):
'''
Generate the acceleration and radiation profiles for a neutral atom in a
Magneto-Optical Trap with one laser pointed down on a horizontal surface where
a reflection grating is located.
Args
id: Data id used for the naming of output files
gratingType: Shape of grating etches/grooves. Valid parameters are "triangle" and "square"
beamRadius: The incident laser beam radius in cm.
gaussian: Is the beam profile Gaussian or uniform? Boolean only.
imperfection: If True, some of the laser beam will be diffracted to 0th order (reflection)
resolution: Resolution of the data in all 3 axes. resolution x 2 = computation x 8.
rangeX: Range of x values to be evaluated in cm.
rangeY: Range of x values to be evaluated in cm.
rangeZ: Range of x values to be evaluated in cm.
precisionCoords: Precision of coordinate data when writen to output file.
precisionData: Precision of field data when writen to output file.
'''
print(f"\nProcessin {id}")
# Calculate the acceleration and radiation pressure, save to file in root/outputs/id/
calculateFields(
id = id,
gratingType = gratingType,
beamRadius = beamRadius,
gaussian = gaussian,
imperfection = imperfection,
resolution = resolution,
rangeX = rangeX,
rangeY = rangeY,
rangeZ = rangeZ,
precisionCoords = precisionCoords,
precisionData = precisionData
)
# Plot fields and save to root/outputs/id/
plotFields(id=id) |
Python | def drag(self):
'''
Calculate the drag of the particle with respect to its frame of reference
'''
return -self.dragCoefficient * np.sqrt(np.dot(self.velocity,self.velocity)) * self.velocity | def drag(self):
'''
Calculate the drag of the particle with respect to its frame of reference
'''
return -self.dragCoefficient * np.sqrt(np.dot(self.velocity,self.velocity)) * self.velocity |
Python | def move(self, a, dt):
'''
Move the particle given an acceleration (a) and a timestep (dt)
'''
self.velocity = self.velocity + dt*a + dt*self.drag()
self.position = self.position + dt*self.velocity
self.addHistory() | def move(self, a, dt):
'''
Move the particle given an acceleration (a) and a timestep (dt)
'''
self.velocity = self.velocity + dt*a + dt*self.drag()
self.position = self.position + dt*self.velocity
self.addHistory() |
Python | def intersectSurface(self, vectorOrigin, vectorDirection, intensity):
'''
Determine whether an incident vector intersects this surface.
vectorOrigin and vectorDirection must be of the 2D form:
[[a1,a2,a3],[b1,b2,b3],[b1,b2,b3],...,[h1,h2,h3]]
'''
if vectorOrigin.ndim == 1:
raise ValueError("Input vectors must be 2D")
indices = np.arange(len(vectorOrigin))
intersectionPoints, conditionIntersect = self.intersect(vectorOrigin, vectorDirection)
indices = indices[conditionIntersect]
intersectionPoints = intersectionPoints[conditionIntersect]
if len(indices) > 0:
#Calculate new direction once reflected
translationFactor = self.dot(vectorOrigin[conditionIntersect]-2*intersectionPoints, self.normalVector)/self.mag(self.normalVector)
vectorDirectionNew = vectorDirection[conditionIntersect] + 2*self.normalVector[None,:]*np.sign(self.dot(-self.normalVector, vectorDirection[conditionIntersect]))[:,None]
vectorOrigin[indices] = intersectionPoints
vectorDirection[indices] = vectorDirectionNew
intensity[indices] *= self.reflectivity
return vectorOrigin, vectorDirection, intensity | def intersectSurface(self, vectorOrigin, vectorDirection, intensity):
'''
Determine whether an incident vector intersects this surface.
vectorOrigin and vectorDirection must be of the 2D form:
[[a1,a2,a3],[b1,b2,b3],[b1,b2,b3],...,[h1,h2,h3]]
'''
if vectorOrigin.ndim == 1:
raise ValueError("Input vectors must be 2D")
indices = np.arange(len(vectorOrigin))
intersectionPoints, conditionIntersect = self.intersect(vectorOrigin, vectorDirection)
indices = indices[conditionIntersect]
intersectionPoints = intersectionPoints[conditionIntersect]
if len(indices) > 0:
#Calculate new direction once reflected
translationFactor = self.dot(vectorOrigin[conditionIntersect]-2*intersectionPoints, self.normalVector)/self.mag(self.normalVector)
vectorDirectionNew = vectorDirection[conditionIntersect] + 2*self.normalVector[None,:]*np.sign(self.dot(-self.normalVector, vectorDirection[conditionIntersect]))[:,None]
vectorOrigin[indices] = intersectionPoints
vectorDirection[indices] = vectorDirectionNew
intensity[indices] *= self.reflectivity
return vectorOrigin, vectorDirection, intensity |
Python | def intersectionPointOnPlane(self, vectorOrigin, vectorDirection):
'''
This surface is defined on a 2D plane. For a given vector, see where the intersection point
is with the plane.
This can be used to determine whether the intersection point is on the actual surface.
'''
# Remove points which are parallel to the plane and won't intrsect
conditionParallel = self.dot(vectorDirection, self.normalVector) == 0.
vectorOrigin[conditionParallel] *= np.nan
vectorDirection[conditionParallel] *= np.nan
translationFactor = self.dot(self.vertices[0]-vectorOrigin, self.normalVector)/self.dot(vectorDirection, self.normalVector)
return vectorOrigin + translationFactor[...,None]*vectorDirection, np.invert(conditionParallel) | def intersectionPointOnPlane(self, vectorOrigin, vectorDirection):
'''
This surface is defined on a 2D plane. For a given vector, see where the intersection point
is with the plane.
This can be used to determine whether the intersection point is on the actual surface.
'''
# Remove points which are parallel to the plane and won't intrsect
conditionParallel = self.dot(vectorDirection, self.normalVector) == 0.
vectorOrigin[conditionParallel] *= np.nan
vectorDirection[conditionParallel] *= np.nan
translationFactor = self.dot(self.vertices[0]-vectorOrigin, self.normalVector)/self.dot(vectorDirection, self.normalVector)
return vectorOrigin + translationFactor[...,None]*vectorDirection, np.invert(conditionParallel) |
Python | def calculateFields(
id = "default",
gratingType = "triangle",
beamRadius = 1.2,
gaussian = True,
imperfection = False,
resolution = 100,
rangeX = [-0.55, 0.55],
rangeY = [-0.55, 0.55],
rangeZ = [0., 1.1],
precisionCoords = 4,
precisionData = 2
):
'''
Generate the acceleration and radiation profiles for a neutral atom in a
Magneto-Optical Trap with one laser pointed down on a horizontal surface where
a reflection grating is located.
Args
id: Data id used for the naming of output files
gratingType: Shape of grating etches/grooves. Valid parameters are "triangle" and "square"
beamRadius: The incident laser beam radius in cm.
gaussian: Is the beam profile Gaussian or uniform? Boolean only.
imperfection: If True, some of the laser beam will be diffracted to 0th order (reflection)
resolution: Resolution of the data in all 3 axes. resolution x 2 = computation x 8.
rangeX: Range of x values to be evaluated in cm.
rangeY: Range of x values to be evaluated in cm.
rangeZ: Range of x values to be evaluated in cm.
precisionCoords: Precision of coordinate data when writen to output file.
precisionData: Precision of field data when writen to output file.
'''
print(f"\nGenerating fields for configuration: {id}")
folder = folders(
id = id,
folderScripts = os.path.dirname(os.path.realpath(__file__))
)
# Get grating geometry and properties
if gratingType == "square":
gratings = makeSquareGrating()
elif gratingType == "triangle":
gratings = makeTriangleGrating()
else:
gratings = makeSquareGrating()
# 3D Coordinate and Acceleration vector fields.
axisX = np.linspace(rangeX[0], rangeX[1], resolution)
axisY = np.linspace(rangeY[0], rangeY[1], resolution)
axisZ = np.linspace(rangeZ[0], rangeZ[1], resolution)
z, y, x = np.meshgrid(axisZ, axisY, axisX, indexing='ij')
coords = np.stack((x,y,z), axis=-1)
# Data arrays
a = np.zeros(coords.shape)
radPressure = np.zeros(coords.shape)
waveVector = np.zeros(coords.shape)
print("Generating fields from incident beam")
# Compute acceleration or radiation pressure from incident laser beam.
intensity = np.ones(x.shape)
if gaussian == True:
intensity *= np.e**(-(x**2 + y**2)/2.)
k = np.array([0., 0., -1.])
waveVector[...] = k
a += acceleration(waveVector, coords, I=intensity)
radPressure[...] += intensity[...,None]*waveVector
# Ignore regions not within the beam radius
conditionBeam = x**2 + y**2 <= beamRadius**2
a *= conditionBeam[...,None]
radPressure *= conditionBeam[...,None]
# Compute acceleration or radiation pressure from 0th order beam.
if imperfection == True:
print("Generating fields from 0th-order diffracted beam")
intensity = 0.01
if gaussian == True:
intensity *= np.exp(-(0.5*x)**2)
k = np.array([0., 0., 1.])
waveVector[...] = k
a += acceleration(waveVector, coords, I=intensity, factor=-1.)
radPressure[...] += intensity[...,None]*waveVector
switch = True
for grating in gratings:
print("Generating fields from grating segment")
if switch == True:
switch = False
factor = -1
else:
factor = 1
# Compute acceleration or radiation pressure from kth grating 1st order beam.
intensity = grating.reflectivity * grating.intensity(
coords,
factor,
beamRadius,
gaussian=gaussian
)
# Unit vector of diffracted beam
waveVector[...] = grating.k
radPressure += waveVector * intensity[...,None]
a += acceleration(waveVector, coords, I=intensity, factor=-1.)
# Compute acceleration or radiation pressure from kth grating 1st order beam in other direction.
intensity = grating.reflectivity * grating.intensity(
coords,
-factor,
beamRadius,
gaussian=gaussian
)
# Unit vector of diffracted beam
waveVector[...] = grating.k
radPressure += waveVector * intensity[...,None]
a += acceleration(waveVector, coords, I=intensity, factor=-1.)
# Store the acceleration magnitude and the components of the acceleration.
aMag = np.sqrt(np.sum(a*a, axis=(a.ndim-1)))
radPressureMag = np.sqrt(np.sum(radPressure*radPressure, axis=(radPressure.ndim-1)))
# Save fields for future use
np.savez_compressed(
os.path.join(folder.outputs, "fieldData.npz"),
id = id,
gratingType = gratingType,
beamRadius = beamRadius,
gaussian = gaussian,
imperfection = imperfection,
resolution = resolution,
rangeX = rangeX,
rangeY = rangeY,
rangeZ = rangeZ,
axisX = axisX,
axisY = axisY,
axisZ = axisZ,
x = x.round(decimals=precisionCoords),
y = y.round(decimals=precisionCoords),
z = z.round(decimals=precisionCoords),
a = a.round(decimals=precisionData),
aMag = aMag.round(decimals=precisionData),
radPressure = radPressure.round(decimals=precisionData),
radPressureMag = radPressureMag.round(decimals=precisionData)
) | def calculateFields(
id = "default",
gratingType = "triangle",
beamRadius = 1.2,
gaussian = True,
imperfection = False,
resolution = 100,
rangeX = [-0.55, 0.55],
rangeY = [-0.55, 0.55],
rangeZ = [0., 1.1],
precisionCoords = 4,
precisionData = 2
):
'''
Generate the acceleration and radiation profiles for a neutral atom in a
Magneto-Optical Trap with one laser pointed down on a horizontal surface where
a reflection grating is located.
Args
id: Data id used for the naming of output files
gratingType: Shape of grating etches/grooves. Valid parameters are "triangle" and "square"
beamRadius: The incident laser beam radius in cm.
gaussian: Is the beam profile Gaussian or uniform? Boolean only.
imperfection: If True, some of the laser beam will be diffracted to 0th order (reflection)
resolution: Resolution of the data in all 3 axes. resolution x 2 = computation x 8.
rangeX: Range of x values to be evaluated in cm.
rangeY: Range of x values to be evaluated in cm.
rangeZ: Range of x values to be evaluated in cm.
precisionCoords: Precision of coordinate data when writen to output file.
precisionData: Precision of field data when writen to output file.
'''
print(f"\nGenerating fields for configuration: {id}")
folder = folders(
id = id,
folderScripts = os.path.dirname(os.path.realpath(__file__))
)
# Get grating geometry and properties
if gratingType == "square":
gratings = makeSquareGrating()
elif gratingType == "triangle":
gratings = makeTriangleGrating()
else:
gratings = makeSquareGrating()
# 3D Coordinate and Acceleration vector fields.
axisX = np.linspace(rangeX[0], rangeX[1], resolution)
axisY = np.linspace(rangeY[0], rangeY[1], resolution)
axisZ = np.linspace(rangeZ[0], rangeZ[1], resolution)
z, y, x = np.meshgrid(axisZ, axisY, axisX, indexing='ij')
coords = np.stack((x,y,z), axis=-1)
# Data arrays
a = np.zeros(coords.shape)
radPressure = np.zeros(coords.shape)
waveVector = np.zeros(coords.shape)
print("Generating fields from incident beam")
# Compute acceleration or radiation pressure from incident laser beam.
intensity = np.ones(x.shape)
if gaussian == True:
intensity *= np.e**(-(x**2 + y**2)/2.)
k = np.array([0., 0., -1.])
waveVector[...] = k
a += acceleration(waveVector, coords, I=intensity)
radPressure[...] += intensity[...,None]*waveVector
# Ignore regions not within the beam radius
conditionBeam = x**2 + y**2 <= beamRadius**2
a *= conditionBeam[...,None]
radPressure *= conditionBeam[...,None]
# Compute acceleration or radiation pressure from 0th order beam.
if imperfection == True:
print("Generating fields from 0th-order diffracted beam")
intensity = 0.01
if gaussian == True:
intensity *= np.exp(-(0.5*x)**2)
k = np.array([0., 0., 1.])
waveVector[...] = k
a += acceleration(waveVector, coords, I=intensity, factor=-1.)
radPressure[...] += intensity[...,None]*waveVector
switch = True
for grating in gratings:
print("Generating fields from grating segment")
if switch == True:
switch = False
factor = -1
else:
factor = 1
# Compute acceleration or radiation pressure from kth grating 1st order beam.
intensity = grating.reflectivity * grating.intensity(
coords,
factor,
beamRadius,
gaussian=gaussian
)
# Unit vector of diffracted beam
waveVector[...] = grating.k
radPressure += waveVector * intensity[...,None]
a += acceleration(waveVector, coords, I=intensity, factor=-1.)
# Compute acceleration or radiation pressure from kth grating 1st order beam in other direction.
intensity = grating.reflectivity * grating.intensity(
coords,
-factor,
beamRadius,
gaussian=gaussian
)
# Unit vector of diffracted beam
waveVector[...] = grating.k
radPressure += waveVector * intensity[...,None]
a += acceleration(waveVector, coords, I=intensity, factor=-1.)
# Store the acceleration magnitude and the components of the acceleration.
aMag = np.sqrt(np.sum(a*a, axis=(a.ndim-1)))
radPressureMag = np.sqrt(np.sum(radPressure*radPressure, axis=(radPressure.ndim-1)))
# Save fields for future use
np.savez_compressed(
os.path.join(folder.outputs, "fieldData.npz"),
id = id,
gratingType = gratingType,
beamRadius = beamRadius,
gaussian = gaussian,
imperfection = imperfection,
resolution = resolution,
rangeX = rangeX,
rangeY = rangeY,
rangeZ = rangeZ,
axisX = axisX,
axisY = axisY,
axisZ = axisZ,
x = x.round(decimals=precisionCoords),
y = y.round(decimals=precisionCoords),
z = z.round(decimals=precisionCoords),
a = a.round(decimals=precisionData),
aMag = aMag.round(decimals=precisionData),
radPressure = radPressure.round(decimals=precisionData),
radPressureMag = radPressureMag.round(decimals=precisionData)
) |
Python | def calculateSaccadeCurvature(xSacc, ySacc, pixPerDegree, ignoreDist = 0.5, flipY = False):
''' Calculates the saccade curvature.\n
Input a list of xSaccade data points as well as a list of ySaccade data points.\n
Also ignores any data points within the start and end range (degrees)\n
Parameters
----------
xSacc: List of lists
[[],[],[],[],[]], Eeach lists contains xSacc data points
ySacc: List of lists
[[],[],[],[],[]], Each lists contains ySacc data points
pixPerDegree: Float
The number of pixels per visual degree
ignoreDist: Float
All data points which are closer than this value (visual degrees) to either the start or end of saccade are ignored
Returns
----------
curveData: List of lists
Containing the saccade curvature for all points in the saccades
saccAngle: List of Floats
The angle of the saccade
Assumptions
----------
Values:
0 - 180 degrees (clockwise curvature)\n
0 - -180 degrees (counterclockwhise curvature)
X values go from left to right\n
Y values go from top to bottom\n
'''
curveData = deque([])
saccAngles = deque([])
for sacc in range(0,len(xSacc)):
saccX = xSacc[sacc]
# Check if there are enough samples in the saccade to calculate curvature
if len(saccX) < 4:
curveData.append(np.nan)
saccAngles.append(np.nan)
continue
if flipY == True:
saccY = [i*-1 for i in ySacc[sacc]]
else:
saccY = ySacc[sacc]
startPos = (saccX[0], saccY[0])
endPos = (saccX[-1], saccY[-1])
saccadeAngle = determineAngle(startPos,endPos) *-1
saccAngle360 = pointAngle((xSacc[sacc][0], ySacc[sacc][0]), (xSacc[sacc][-1], ySacc[sacc][-1]))
# we calculate point angle for all points except the last point (also exclude first point)
pointAngles = np.zeros(len(saccX)-2)
for pointNr in range(0,len(saccX)-2):
point = (saccX[pointNr+1], saccY[pointNr+1])
startDist = distBetweenPoints(startPos, point) / pixPerDegree
endDist = distBetweenPoints(endPos, point) / pixPerDegree
# check if the sample is far enough away from start and end position
# We have the problem with overshoots, the overshoot is sometimes more than ignoreDist
# this causes the cuvature analysis to be done for then stopped, started and stopped again
# [0,0,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,] Where 1 has calculated curvature and 0 is to close
if min([startDist, endDist]) < ignoreDist:
pointAngles[pointNr] = 9999
else:
pointCurv = (determineAngle(startPos,point) *-1) - saccadeAngle
if pointCurv > 180:
pointCurv -=360
elif pointCurv < -180:
pointCurv +=360
pointAngles[pointNr] = pointCurv
pointAngles = pointAngles[pointAngles < 9999]
curveData.append(pointAngles)
# Append saccadeAngles
# if saccadeAngle > 180:
# saccadeAngle -=360
# elif saccadeAngle < -180:
# saccadeAngle +=360
saccAngles.append(saccAngle360)
return curveData, saccAngles | def calculateSaccadeCurvature(xSacc, ySacc, pixPerDegree, ignoreDist = 0.5, flipY = False):
''' Calculates the saccade curvature.\n
Input a list of xSaccade data points as well as a list of ySaccade data points.\n
Also ignores any data points within the start and end range (degrees)\n
Parameters
----------
xSacc: List of lists
[[],[],[],[],[]], Eeach lists contains xSacc data points
ySacc: List of lists
[[],[],[],[],[]], Each lists contains ySacc data points
pixPerDegree: Float
The number of pixels per visual degree
ignoreDist: Float
All data points which are closer than this value (visual degrees) to either the start or end of saccade are ignored
Returns
----------
curveData: List of lists
Containing the saccade curvature for all points in the saccades
saccAngle: List of Floats
The angle of the saccade
Assumptions
----------
Values:
0 - 180 degrees (clockwise curvature)\n
0 - -180 degrees (counterclockwhise curvature)
X values go from left to right\n
Y values go from top to bottom\n
'''
curveData = deque([])
saccAngles = deque([])
for sacc in range(0,len(xSacc)):
saccX = xSacc[sacc]
# Check if there are enough samples in the saccade to calculate curvature
if len(saccX) < 4:
curveData.append(np.nan)
saccAngles.append(np.nan)
continue
if flipY == True:
saccY = [i*-1 for i in ySacc[sacc]]
else:
saccY = ySacc[sacc]
startPos = (saccX[0], saccY[0])
endPos = (saccX[-1], saccY[-1])
saccadeAngle = determineAngle(startPos,endPos) *-1
saccAngle360 = pointAngle((xSacc[sacc][0], ySacc[sacc][0]), (xSacc[sacc][-1], ySacc[sacc][-1]))
# we calculate point angle for all points except the last point (also exclude first point)
pointAngles = np.zeros(len(saccX)-2)
for pointNr in range(0,len(saccX)-2):
point = (saccX[pointNr+1], saccY[pointNr+1])
startDist = distBetweenPoints(startPos, point) / pixPerDegree
endDist = distBetweenPoints(endPos, point) / pixPerDegree
# check if the sample is far enough away from start and end position
# We have the problem with overshoots, the overshoot is sometimes more than ignoreDist
# this causes the cuvature analysis to be done for then stopped, started and stopped again
# [0,0,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,] Where 1 has calculated curvature and 0 is to close
if min([startDist, endDist]) < ignoreDist:
pointAngles[pointNr] = 9999
else:
pointCurv = (determineAngle(startPos,point) *-1) - saccadeAngle
if pointCurv > 180:
pointCurv -=360
elif pointCurv < -180:
pointCurv +=360
pointAngles[pointNr] = pointCurv
pointAngles = pointAngles[pointAngles < 9999]
curveData.append(pointAngles)
# Append saccadeAngles
# if saccadeAngle > 180:
# saccadeAngle -=360
# elif saccadeAngle < -180:
# saccadeAngle +=360
saccAngles.append(saccAngle360)
return curveData, saccAngles |
Python | def parseToLongFormat(data, duplicate = 'No', eyetracker='Eyelink'):
'''
Turn a parsed datafile into long data file:
Deletes the raw data and only keeps events
'''
data = data.copy()
#==============================================================================
# Delete al the keys with raw data
#==============================================================================
LargeData = ['saccTraceTime', 'saccTraceX', 'saccTraceY', 'saccTracePup',
'euclidDist', 'rawPupSize', 'rawTime', 'rawX', 'rawY',
'fixTraceTime', 'fixTraceX', 'fixTraceY', 'fixTracePup', 'speed']
if eyetracker == 'Tobii':
add = []
unfilt = ['rawTime', 'GazePointXLeft', 'GazePointYLeft',
'ValidityLeft', 'GazePointXRight', 'GazePointYRight',
'ValidityRight', 'rawX', 'rawY', 'PupilSizeLeft',
'PupilValidityLeft', 'PupilSizeRight' , 'PupilValidityRight',
'rawPupSize', 'pupValidity', 'gazeValidity']
variousKeys = ['saccBool', 'fixBool']
for item in unfilt:
add.append(item+'Unfilt')
for item in variousKeys:
add.append(item)
for item in add:
LargeData.append(item)
LargeData = ['DK_'+i for i in LargeData]
LargeData.append('DV_description')
for key in LargeData:
if key in data.keys():
del data[key]
elif key[3:] in data.keys():
del data[key[3:]]
# Delete all headers with spaces
for key in data.keys():
if len(key.split()) > 1:
del data[key]
# Get the largest number of events for each trial
trialLengths = np.zeros(len(data))
for trial in xrange(len(data)):
for key in data.keys():
try:
if isinstance(data[key][trial], basestring):
keyLen = 1
else:
keyLen = len(data[key][trial])
except:
keyLen = 1
pass
if keyLen > trialLengths[trial]:
trialLengths[trial] = keyLen
# Itterate trough each trial and add an index for the events given
saccNr = []
fixNr = []
for i in range(len(data)):
saccNr.append(np.arange(len(data['DK_ssacc'][i]))+1)
fixNr.append(np.arange(len(data['DK_sFix'][i]))+1)
data['DK_saccNr'] = saccNr
data['DK_fixNr'] = fixNr
# Initiate a long format data frame
dataL = pd.DataFrame(index = xrange(int(np.sum(trialLengths))), columns = data.keys())
# Itterate through each key and populate the long format data
for key in data.keys():
strtIndex = 0
stopIndex = int(trialLengths[0])
keyVector = np.empty(len(dataL[key]))
keyVector[:] = np.NAN
keyVector = pd.Series(keyVector)
for trial in xrange(len(data)):
try:
dataLen = len(data[key][trial])
if isinstance(data[key][trial], basestring):
if duplicate == 'Yes':
keyVector[strtIndex:stopIndex] = data[key][trial]
else:
keyVector[strtIndex] = data[key][trial]
else:
keyVector[strtIndex:strtIndex+dataLen] = data[key][trial]
except:
if duplicate == 'Yes':
keyVector[strtIndex:stopIndex] = data[key][trial]
else:
keyVector[strtIndex] = data[key][trial]
# Update the index for the next data trial indexl
if trial < len(data)-1:
strtIndex += int(trialLengths[trial])
stopIndex = int(strtIndex + trialLengths[trial+1])
# Store the new vector in the dataframe
dataL[key] = keyVector
return dataL | def parseToLongFormat(data, duplicate = 'No', eyetracker='Eyelink'):
'''
Turn a parsed datafile into long data file:
Deletes the raw data and only keeps events
'''
data = data.copy()
#==============================================================================
# Delete al the keys with raw data
#==============================================================================
LargeData = ['saccTraceTime', 'saccTraceX', 'saccTraceY', 'saccTracePup',
'euclidDist', 'rawPupSize', 'rawTime', 'rawX', 'rawY',
'fixTraceTime', 'fixTraceX', 'fixTraceY', 'fixTracePup', 'speed']
if eyetracker == 'Tobii':
add = []
unfilt = ['rawTime', 'GazePointXLeft', 'GazePointYLeft',
'ValidityLeft', 'GazePointXRight', 'GazePointYRight',
'ValidityRight', 'rawX', 'rawY', 'PupilSizeLeft',
'PupilValidityLeft', 'PupilSizeRight' , 'PupilValidityRight',
'rawPupSize', 'pupValidity', 'gazeValidity']
variousKeys = ['saccBool', 'fixBool']
for item in unfilt:
add.append(item+'Unfilt')
for item in variousKeys:
add.append(item)
for item in add:
LargeData.append(item)
LargeData = ['DK_'+i for i in LargeData]
LargeData.append('DV_description')
for key in LargeData:
if key in data.keys():
del data[key]
elif key[3:] in data.keys():
del data[key[3:]]
# Delete all headers with spaces
for key in data.keys():
if len(key.split()) > 1:
del data[key]
# Get the largest number of events for each trial
trialLengths = np.zeros(len(data))
for trial in xrange(len(data)):
for key in data.keys():
try:
if isinstance(data[key][trial], basestring):
keyLen = 1
else:
keyLen = len(data[key][trial])
except:
keyLen = 1
pass
if keyLen > trialLengths[trial]:
trialLengths[trial] = keyLen
# Itterate trough each trial and add an index for the events given
saccNr = []
fixNr = []
for i in range(len(data)):
saccNr.append(np.arange(len(data['DK_ssacc'][i]))+1)
fixNr.append(np.arange(len(data['DK_sFix'][i]))+1)
data['DK_saccNr'] = saccNr
data['DK_fixNr'] = fixNr
# Initiate a long format data frame
dataL = pd.DataFrame(index = xrange(int(np.sum(trialLengths))), columns = data.keys())
# Itterate through each key and populate the long format data
for key in data.keys():
strtIndex = 0
stopIndex = int(trialLengths[0])
keyVector = np.empty(len(dataL[key]))
keyVector[:] = np.NAN
keyVector = pd.Series(keyVector)
for trial in xrange(len(data)):
try:
dataLen = len(data[key][trial])
if isinstance(data[key][trial], basestring):
if duplicate == 'Yes':
keyVector[strtIndex:stopIndex] = data[key][trial]
else:
keyVector[strtIndex] = data[key][trial]
else:
keyVector[strtIndex:strtIndex+dataLen] = data[key][trial]
except:
if duplicate == 'Yes':
keyVector[strtIndex:stopIndex] = data[key][trial]
else:
keyVector[strtIndex] = data[key][trial]
# Update the index for the next data trial indexl
if trial < len(data)-1:
strtIndex += int(trialLengths[trial])
stopIndex = int(strtIndex + trialLengths[trial+1])
# Store the new vector in the dataframe
dataL[key] = keyVector
return dataL |
Python | def is_async_mode():
"""Tests if we're in the async part of the code or not"""
async def f():
"""Unasync transforms async functions in sync functions"""
return None
obj = f()
if obj is None:
return False
else:
obj.close() # prevent unawaited coroutine warning
return True | def is_async_mode():
"""Tests if we're in the async part of the code or not"""
async def f():
"""Unasync transforms async functions in sync functions"""
return None
obj = f()
if obj is None:
return False
else:
obj.close() # prevent unawaited coroutine warning
return True |
Python | def uncertain_smooth_l1_loss(pred, target, sigma, alpha=1.0, beta=1.0):
"""Smooth L1 loss with uncertainty.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
sigma (torch.Tensor): The sigma for uncertainty.
alpha (float, optional): The coefficient of log(sigma).
Defaults to 1.0.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
assert target.numel() > 0
assert pred.size() == target.size() == sigma.size(), 'The size of pred ' \
f'{pred.size()}, target {target.size()}, and sigma {sigma.size()} ' \
'are inconsistent.'
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
loss = torch.exp(-sigma) * loss + alpha * sigma
return loss | def uncertain_smooth_l1_loss(pred, target, sigma, alpha=1.0, beta=1.0):
"""Smooth L1 loss with uncertainty.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
sigma (torch.Tensor): The sigma for uncertainty.
alpha (float, optional): The coefficient of log(sigma).
Defaults to 1.0.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
assert target.numel() > 0
assert pred.size() == target.size() == sigma.size(), 'The size of pred ' \
f'{pred.size()}, target {target.size()}, and sigma {sigma.size()} ' \
'are inconsistent.'
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
loss = torch.exp(-sigma) * loss + alpha * sigma
return loss |
Python | def make_encoder_layers(self,
make_block,
norm_cfg,
in_channels,
block_type='conv_module',
conv_cfg=dict(type='SubMConv3d')):
"""make encoder layers using sparse convs.
Args:
make_block (method): A bounded function to build blocks.
norm_cfg (dict[str]): Config of normalization layer.
in_channels (int): The number of encoder input channels.
block_type (str): Type of the block to use. Defaults to
'conv_module'.
conv_cfg (dict): Config of conv layer. Defaults to
dict(type='SubMConv3d').
Returns:
int: The number of encoder output channels.
"""
assert block_type in ['conv_module', 'basicblock']
self.encoder_layers = spconv.SparseSequential()
for i, blocks in enumerate(self.encoder_channels):
blocks_list = []
for j, out_channels in enumerate(tuple(blocks)):
padding = tuple(self.encoder_paddings[i])[j]
# each stage started with a spconv layer
# except the first stage
if i != 0 and j == 0 and block_type == 'conv_module':
blocks_list.append(
make_block(in_channels,
out_channels,
3,
norm_cfg=norm_cfg,
stride=2,
padding=padding,
indice_key=f'spconv{i + 1}',
conv_type='SparseConv3d'))
elif block_type == 'basicblock':
if j == len(blocks) - 1 and i != len(
self.encoder_channels) - 1:
blocks_list.append(
make_block(in_channels,
out_channels,
3,
norm_cfg=norm_cfg,
stride=2,
padding=padding,
indice_key=f'spconv{i + 1}',
conv_type='SparseConv3d'))
else:
blocks_list.append(
SparseBasicBlock(out_channels,
out_channels,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg))
else:
blocks_list.append(
make_block(in_channels,
out_channels,
3,
norm_cfg=norm_cfg,
padding=padding,
indice_key=f'subm{i + 1}',
conv_type='SubMConv3d'))
if (self.fusion_layer
is not None) and (i in self.fusion_pos) and (
self.fusion_layer.fusion_method == 'concat'
or self.fusion_layer.fusion_method
== 'gating_v1') and (j == len(blocks) - 1):
out_channels = out_channels * 2
in_channels = out_channels
stage_name = f'encoder_layer{i + 1}'
stage_layers = spconv.SparseSequential(*blocks_list)
self.encoder_layers.add_module(stage_name, stage_layers)
return out_channels | def make_encoder_layers(self,
make_block,
norm_cfg,
in_channels,
block_type='conv_module',
conv_cfg=dict(type='SubMConv3d')):
"""make encoder layers using sparse convs.
Args:
make_block (method): A bounded function to build blocks.
norm_cfg (dict[str]): Config of normalization layer.
in_channels (int): The number of encoder input channels.
block_type (str): Type of the block to use. Defaults to
'conv_module'.
conv_cfg (dict): Config of conv layer. Defaults to
dict(type='SubMConv3d').
Returns:
int: The number of encoder output channels.
"""
assert block_type in ['conv_module', 'basicblock']
self.encoder_layers = spconv.SparseSequential()
for i, blocks in enumerate(self.encoder_channels):
blocks_list = []
for j, out_channels in enumerate(tuple(blocks)):
padding = tuple(self.encoder_paddings[i])[j]
# each stage started with a spconv layer
# except the first stage
if i != 0 and j == 0 and block_type == 'conv_module':
blocks_list.append(
make_block(in_channels,
out_channels,
3,
norm_cfg=norm_cfg,
stride=2,
padding=padding,
indice_key=f'spconv{i + 1}',
conv_type='SparseConv3d'))
elif block_type == 'basicblock':
if j == len(blocks) - 1 and i != len(
self.encoder_channels) - 1:
blocks_list.append(
make_block(in_channels,
out_channels,
3,
norm_cfg=norm_cfg,
stride=2,
padding=padding,
indice_key=f'spconv{i + 1}',
conv_type='SparseConv3d'))
else:
blocks_list.append(
SparseBasicBlock(out_channels,
out_channels,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg))
else:
blocks_list.append(
make_block(in_channels,
out_channels,
3,
norm_cfg=norm_cfg,
padding=padding,
indice_key=f'subm{i + 1}',
conv_type='SubMConv3d'))
if (self.fusion_layer
is not None) and (i in self.fusion_pos) and (
self.fusion_layer.fusion_method == 'concat'
or self.fusion_layer.fusion_method
== 'gating_v1') and (j == len(blocks) - 1):
out_channels = out_channels * 2
in_channels = out_channels
stage_name = f'encoder_layer{i + 1}'
stage_layers = spconv.SparseSequential(*blocks_list)
self.encoder_layers.add_module(stage_name, stage_layers)
return out_channels |
Python | def decode(self, bbox, scale, stride, training, cls_score=None):
"""Decode regressed results into 3D predictions.
Note that offsets are not transformed to the projected 3D centers.
Args:
bbox (torch.Tensor): Raw bounding box predictions in shape
[N, C, H, W].
scale (tuple[`Scale`]): Learnable scale parameters.
stride (int): Stride for a specific feature level.
training (bool): Whether the decoding is in the training
procedure.
cls_score (torch.Tensor): Classification score map for deciding
which base depth or dim is used. Defaults to None.
Returns:
torch.Tensor: Decoded boxes.
"""
# scale the bbox of different level
# only apply to offset, depth and size prediction
scale_offset, scale_depth, scale_size = scale[0:3]
clone_bbox = bbox.clone()
bbox[:, :2] = scale_offset(clone_bbox[:, :2]).float()
bbox[:, 2] = scale_depth(clone_bbox[:, 2]).float()
bbox[:, 3:6] = scale_size(clone_bbox[:, 3:6]).float()
if self.base_depths is None:
bbox[:, 2] = bbox[:, 2].exp()
elif len(self.base_depths) == 1: # only single prior
mean = self.base_depths[0][0]
std = self.base_depths[0][1]
bbox[:, 2] = mean + bbox.clone()[:, 2] * std
else: # multi-class priors
assert len(self.base_depths) == cls_score.shape[1], \
'The number of multi-class depth priors should be equal to ' \
'the number of categories.'
indices = cls_score.max(dim=1)[1]
depth_priors = cls_score.new_tensor(
self.base_depths)[indices, :].permute(0, 3, 1, 2)
mean = depth_priors[:, 0]
std = depth_priors[:, 1]
bbox[:, 2] = mean + bbox.clone()[:, 2] * std
bbox[:, 3:6] = bbox[:, 3:6].exp()
if self.base_dims is not None:
assert len(self.base_dims) == cls_score.shape[1], \
'The number of anchor sizes should be equal to the number ' \
'of categories.'
indices = cls_score.max(dim=1)[1]
size_priors = cls_score.new_tensor(
self.base_dims)[indices, :].permute(0, 3, 1, 2)
bbox[:, 3:6] = size_priors * bbox.clone()[:, 3:6]
assert self.norm_on_bbox is True, 'Setting norm_on_bbox to False '\
'has not been thoroughly tested for FCOS3D.'
if self.norm_on_bbox:
if not training:
# Note that this line is conducted only when testing
bbox[:, :2] *= stride
return bbox | def decode(self, bbox, scale, stride, training, cls_score=None):
"""Decode regressed results into 3D predictions.
Note that offsets are not transformed to the projected 3D centers.
Args:
bbox (torch.Tensor): Raw bounding box predictions in shape
[N, C, H, W].
scale (tuple[`Scale`]): Learnable scale parameters.
stride (int): Stride for a specific feature level.
training (bool): Whether the decoding is in the training
procedure.
cls_score (torch.Tensor): Classification score map for deciding
which base depth or dim is used. Defaults to None.
Returns:
torch.Tensor: Decoded boxes.
"""
# scale the bbox of different level
# only apply to offset, depth and size prediction
scale_offset, scale_depth, scale_size = scale[0:3]
clone_bbox = bbox.clone()
bbox[:, :2] = scale_offset(clone_bbox[:, :2]).float()
bbox[:, 2] = scale_depth(clone_bbox[:, 2]).float()
bbox[:, 3:6] = scale_size(clone_bbox[:, 3:6]).float()
if self.base_depths is None:
bbox[:, 2] = bbox[:, 2].exp()
elif len(self.base_depths) == 1: # only single prior
mean = self.base_depths[0][0]
std = self.base_depths[0][1]
bbox[:, 2] = mean + bbox.clone()[:, 2] * std
else: # multi-class priors
assert len(self.base_depths) == cls_score.shape[1], \
'The number of multi-class depth priors should be equal to ' \
'the number of categories.'
indices = cls_score.max(dim=1)[1]
depth_priors = cls_score.new_tensor(
self.base_depths)[indices, :].permute(0, 3, 1, 2)
mean = depth_priors[:, 0]
std = depth_priors[:, 1]
bbox[:, 2] = mean + bbox.clone()[:, 2] * std
bbox[:, 3:6] = bbox[:, 3:6].exp()
if self.base_dims is not None:
assert len(self.base_dims) == cls_score.shape[1], \
'The number of anchor sizes should be equal to the number ' \
'of categories.'
indices = cls_score.max(dim=1)[1]
size_priors = cls_score.new_tensor(
self.base_dims)[indices, :].permute(0, 3, 1, 2)
bbox[:, 3:6] = size_priors * bbox.clone()[:, 3:6]
assert self.norm_on_bbox is True, 'Setting norm_on_bbox to False '\
'has not been thoroughly tested for FCOS3D.'
if self.norm_on_bbox:
if not training:
# Note that this line is conducted only when testing
bbox[:, :2] *= stride
return bbox |
Python | def decode_yaw(bbox, centers2d, dir_cls, dir_offset, cam2img):
"""Decode yaw angle and change it from local to global.i.
Args:
bbox (torch.Tensor): Bounding box predictions in shape
[N, C] with yaws to be decoded.
centers2d (torch.Tensor): Projected 3D-center on the image planes
corresponding to the box predictions.
dir_cls (torch.Tensor): Predicted direction classes.
dir_offset (float): Direction offset before dividing all the
directions into several classes.
cam2img (torch.Tensor): Camera intrinsic matrix in shape [4, 4].
Returns:
torch.Tensor: Bounding boxes with decoded yaws.
"""
if bbox.shape[0] > 0:
dir_rot = limit_period(bbox[..., 6] - dir_offset, 0, np.pi)
bbox[..., 6] = \
dir_rot + dir_offset + np.pi * dir_cls.to(bbox.dtype)
bbox[:, 6] = torch.atan2(centers2d[:, 0] - cam2img[0, 2],
cam2img[0, 0]) + bbox[:, 6]
return bbox | def decode_yaw(bbox, centers2d, dir_cls, dir_offset, cam2img):
"""Decode yaw angle and change it from local to global.i.
Args:
bbox (torch.Tensor): Bounding box predictions in shape
[N, C] with yaws to be decoded.
centers2d (torch.Tensor): Projected 3D-center on the image planes
corresponding to the box predictions.
dir_cls (torch.Tensor): Predicted direction classes.
dir_offset (float): Direction offset before dividing all the
directions into several classes.
cam2img (torch.Tensor): Camera intrinsic matrix in shape [4, 4].
Returns:
torch.Tensor: Bounding boxes with decoded yaws.
"""
if bbox.shape[0] > 0:
dir_rot = limit_period(bbox[..., 6] - dir_offset, 0, np.pi)
bbox[..., 6] = \
dir_rot + dir_offset + np.pi * dir_cls.to(bbox.dtype)
bbox[:, 6] = torch.atan2(centers2d[:, 0] - cam2img[0, 2],
cam2img[0, 0]) + bbox[:, 6]
return bbox |
Python | def array_converter(to_torch=True,
apply_to=tuple(),
template_arg_name_=None,
recover=True):
"""Wrapper function for data-type agnostic processing.
First converts input arrays to PyTorch tensors or NumPy ndarrays
for middle calculation, then convert output to original data-type if
`recover=True`.
Args:
to_torch (Bool, optional): Whether convert to PyTorch tensors
for middle calculation. Defaults to True.
apply_to (tuple[str], optional): The arguments to which we apply
data-type conversion. Defaults to an empty tuple.
template_arg_name_ (str, optional): Argument serving as the template (
return arrays should have the same dtype and device
as the template). Defaults to None. If None, we will use the
first argument in `apply_to` as the template argument.
recover (Bool, optional): Whether or not recover the wrapped function
outputs to the `template_arg_name_` type. Defaults to True.
Raises:
ValueError: When template_arg_name_ is not among all args, or
when apply_to contains an arg which is not among all args,
a ValueError will be raised. When the template argument or
an argument to convert is a list or tuple, and cannot be
converted to a NumPy array, a ValueError will be raised.
TypeError: When the type of the template argument or
an argument to convert does not belong to the above range,
or the contents of such an list-or-tuple-type argument
do not share the same data type, a TypeError is raised.
Returns:
(function): wrapped function.
Example:
>>> import torch
>>> import numpy as np
>>>
>>> # Use torch addition for a + b,
>>> # and convert return values to the type of a
>>> @array_converter(apply_to=('a', 'b'))
>>> def simple_add(a, b):
>>> return a + b
>>>
>>> a = np.array([1.1])
>>> b = np.array([2.2])
>>> simple_add(a, b)
>>>
>>> # Use numpy addition for a + b,
>>> # and convert return values to the type of b
>>> @array_converter(to_torch=False, apply_to=('a', 'b'),
>>> template_arg_name_='b')
>>> def simple_add(a, b):
>>> return a + b
>>>
>>> simple_add()
>>>
>>> # Use torch funcs for floor(a) if flag=True else ceil(a),
>>> # and return the torch tensor
>>> @array_converter(apply_to=('a',), recover=False)
>>> def floor_or_ceil(a, flag=True):
>>> return torch.floor(a) if flag else torch.ceil(a)
>>>
>>> floor_or_ceil(a, flag=False)
"""
def array_converter_wrapper(func):
"""Outer wrapper for the function."""
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Inner wrapper for the arguments."""
if len(apply_to) == 0:
return func(*args, **kwargs)
func_name = func.__name__
arg_spec = getfullargspec(func)
arg_names = arg_spec.args
arg_num = len(arg_names)
default_arg_values = arg_spec.defaults
if default_arg_values is None:
default_arg_values = []
no_default_arg_num = len(arg_names) - len(default_arg_values)
kwonly_arg_names = arg_spec.kwonlyargs
kwonly_default_arg_values = arg_spec.kwonlydefaults
if kwonly_default_arg_values is None:
kwonly_default_arg_values = {}
all_arg_names = arg_names + kwonly_arg_names
# in case there are args in the form of *args
if len(args) > arg_num:
named_args = args[:arg_num]
nameless_args = args[arg_num:]
else:
named_args = args
nameless_args = []
# template argument data type is used for all array-like arguments
if template_arg_name_ is None:
template_arg_name = apply_to[0]
else:
template_arg_name = template_arg_name_
if template_arg_name not in all_arg_names:
raise ValueError(f'{template_arg_name} is not among the '
f'argument list of function {func_name}')
# inspect apply_to
for arg_to_apply in apply_to:
if arg_to_apply not in all_arg_names:
raise ValueError(f'{arg_to_apply} is not '
f'an argument of {func_name}')
new_args = []
new_kwargs = {}
converter = ArrayConverter()
target_type = torch.Tensor if to_torch else np.ndarray
# non-keyword arguments
for i, arg_value in enumerate(named_args):
if arg_names[i] in apply_to:
new_args.append(
converter.convert(
input_array=arg_value, target_type=target_type))
else:
new_args.append(arg_value)
if arg_names[i] == template_arg_name:
template_arg_value = arg_value
kwonly_default_arg_values.update(kwargs)
kwargs = kwonly_default_arg_values
# keyword arguments and non-keyword arguments using default value
for i in range(len(named_args), len(all_arg_names)):
arg_name = all_arg_names[i]
if arg_name in kwargs:
if arg_name in apply_to:
new_kwargs[arg_name] = converter.convert(
input_array=kwargs[arg_name],
target_type=target_type)
else:
new_kwargs[arg_name] = kwargs[arg_name]
else:
default_value = default_arg_values[i - no_default_arg_num]
if arg_name in apply_to:
new_kwargs[arg_name] = converter.convert(
input_array=default_value, target_type=target_type)
else:
new_kwargs[arg_name] = default_value
if arg_name == template_arg_name:
template_arg_value = kwargs[arg_name]
# add nameless args provided by *args (if exists)
new_args += nameless_args
return_values = func(*new_args, **new_kwargs)
converter.set_template(template_arg_value)
def recursive_recover(input_data):
if isinstance(input_data, (tuple, list)):
new_data = []
for item in input_data:
new_data.append(recursive_recover(item))
return tuple(new_data) if isinstance(input_data,
tuple) else new_data
elif isinstance(input_data, dict):
new_data = {}
for k, v in input_data.items():
new_data[k] = recursive_recover(v)
return new_data
elif isinstance(input_data, (torch.Tensor, np.ndarray)):
return converter.recover(input_data)
else:
return input_data
if recover:
return recursive_recover(return_values)
else:
return return_values
return new_func
return array_converter_wrapper | def array_converter(to_torch=True,
apply_to=tuple(),
template_arg_name_=None,
recover=True):
"""Wrapper function for data-type agnostic processing.
First converts input arrays to PyTorch tensors or NumPy ndarrays
for middle calculation, then convert output to original data-type if
`recover=True`.
Args:
to_torch (Bool, optional): Whether convert to PyTorch tensors
for middle calculation. Defaults to True.
apply_to (tuple[str], optional): The arguments to which we apply
data-type conversion. Defaults to an empty tuple.
template_arg_name_ (str, optional): Argument serving as the template (
return arrays should have the same dtype and device
as the template). Defaults to None. If None, we will use the
first argument in `apply_to` as the template argument.
recover (Bool, optional): Whether or not recover the wrapped function
outputs to the `template_arg_name_` type. Defaults to True.
Raises:
ValueError: When template_arg_name_ is not among all args, or
when apply_to contains an arg which is not among all args,
a ValueError will be raised. When the template argument or
an argument to convert is a list or tuple, and cannot be
converted to a NumPy array, a ValueError will be raised.
TypeError: When the type of the template argument or
an argument to convert does not belong to the above range,
or the contents of such an list-or-tuple-type argument
do not share the same data type, a TypeError is raised.
Returns:
(function): wrapped function.
Example:
>>> import torch
>>> import numpy as np
>>>
>>> # Use torch addition for a + b,
>>> # and convert return values to the type of a
>>> @array_converter(apply_to=('a', 'b'))
>>> def simple_add(a, b):
>>> return a + b
>>>
>>> a = np.array([1.1])
>>> b = np.array([2.2])
>>> simple_add(a, b)
>>>
>>> # Use numpy addition for a + b,
>>> # and convert return values to the type of b
>>> @array_converter(to_torch=False, apply_to=('a', 'b'),
>>> template_arg_name_='b')
>>> def simple_add(a, b):
>>> return a + b
>>>
>>> simple_add()
>>>
>>> # Use torch funcs for floor(a) if flag=True else ceil(a),
>>> # and return the torch tensor
>>> @array_converter(apply_to=('a',), recover=False)
>>> def floor_or_ceil(a, flag=True):
>>> return torch.floor(a) if flag else torch.ceil(a)
>>>
>>> floor_or_ceil(a, flag=False)
"""
def array_converter_wrapper(func):
"""Outer wrapper for the function."""
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Inner wrapper for the arguments."""
if len(apply_to) == 0:
return func(*args, **kwargs)
func_name = func.__name__
arg_spec = getfullargspec(func)
arg_names = arg_spec.args
arg_num = len(arg_names)
default_arg_values = arg_spec.defaults
if default_arg_values is None:
default_arg_values = []
no_default_arg_num = len(arg_names) - len(default_arg_values)
kwonly_arg_names = arg_spec.kwonlyargs
kwonly_default_arg_values = arg_spec.kwonlydefaults
if kwonly_default_arg_values is None:
kwonly_default_arg_values = {}
all_arg_names = arg_names + kwonly_arg_names
# in case there are args in the form of *args
if len(args) > arg_num:
named_args = args[:arg_num]
nameless_args = args[arg_num:]
else:
named_args = args
nameless_args = []
# template argument data type is used for all array-like arguments
if template_arg_name_ is None:
template_arg_name = apply_to[0]
else:
template_arg_name = template_arg_name_
if template_arg_name not in all_arg_names:
raise ValueError(f'{template_arg_name} is not among the '
f'argument list of function {func_name}')
# inspect apply_to
for arg_to_apply in apply_to:
if arg_to_apply not in all_arg_names:
raise ValueError(f'{arg_to_apply} is not '
f'an argument of {func_name}')
new_args = []
new_kwargs = {}
converter = ArrayConverter()
target_type = torch.Tensor if to_torch else np.ndarray
# non-keyword arguments
for i, arg_value in enumerate(named_args):
if arg_names[i] in apply_to:
new_args.append(
converter.convert(
input_array=arg_value, target_type=target_type))
else:
new_args.append(arg_value)
if arg_names[i] == template_arg_name:
template_arg_value = arg_value
kwonly_default_arg_values.update(kwargs)
kwargs = kwonly_default_arg_values
# keyword arguments and non-keyword arguments using default value
for i in range(len(named_args), len(all_arg_names)):
arg_name = all_arg_names[i]
if arg_name in kwargs:
if arg_name in apply_to:
new_kwargs[arg_name] = converter.convert(
input_array=kwargs[arg_name],
target_type=target_type)
else:
new_kwargs[arg_name] = kwargs[arg_name]
else:
default_value = default_arg_values[i - no_default_arg_num]
if arg_name in apply_to:
new_kwargs[arg_name] = converter.convert(
input_array=default_value, target_type=target_type)
else:
new_kwargs[arg_name] = default_value
if arg_name == template_arg_name:
template_arg_value = kwargs[arg_name]
# add nameless args provided by *args (if exists)
new_args += nameless_args
return_values = func(*new_args, **new_kwargs)
converter.set_template(template_arg_value)
def recursive_recover(input_data):
if isinstance(input_data, (tuple, list)):
new_data = []
for item in input_data:
new_data.append(recursive_recover(item))
return tuple(new_data) if isinstance(input_data,
tuple) else new_data
elif isinstance(input_data, dict):
new_data = {}
for k, v in input_data.items():
new_data[k] = recursive_recover(v)
return new_data
elif isinstance(input_data, (torch.Tensor, np.ndarray)):
return converter.recover(input_data)
else:
return input_data
if recover:
return recursive_recover(return_values)
else:
return return_values
return new_func
return array_converter_wrapper |
Python | def array_converter_wrapper(func):
"""Outer wrapper for the function."""
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Inner wrapper for the arguments."""
if len(apply_to) == 0:
return func(*args, **kwargs)
func_name = func.__name__
arg_spec = getfullargspec(func)
arg_names = arg_spec.args
arg_num = len(arg_names)
default_arg_values = arg_spec.defaults
if default_arg_values is None:
default_arg_values = []
no_default_arg_num = len(arg_names) - len(default_arg_values)
kwonly_arg_names = arg_spec.kwonlyargs
kwonly_default_arg_values = arg_spec.kwonlydefaults
if kwonly_default_arg_values is None:
kwonly_default_arg_values = {}
all_arg_names = arg_names + kwonly_arg_names
# in case there are args in the form of *args
if len(args) > arg_num:
named_args = args[:arg_num]
nameless_args = args[arg_num:]
else:
named_args = args
nameless_args = []
# template argument data type is used for all array-like arguments
if template_arg_name_ is None:
template_arg_name = apply_to[0]
else:
template_arg_name = template_arg_name_
if template_arg_name not in all_arg_names:
raise ValueError(f'{template_arg_name} is not among the '
f'argument list of function {func_name}')
# inspect apply_to
for arg_to_apply in apply_to:
if arg_to_apply not in all_arg_names:
raise ValueError(f'{arg_to_apply} is not '
f'an argument of {func_name}')
new_args = []
new_kwargs = {}
converter = ArrayConverter()
target_type = torch.Tensor if to_torch else np.ndarray
# non-keyword arguments
for i, arg_value in enumerate(named_args):
if arg_names[i] in apply_to:
new_args.append(
converter.convert(
input_array=arg_value, target_type=target_type))
else:
new_args.append(arg_value)
if arg_names[i] == template_arg_name:
template_arg_value = arg_value
kwonly_default_arg_values.update(kwargs)
kwargs = kwonly_default_arg_values
# keyword arguments and non-keyword arguments using default value
for i in range(len(named_args), len(all_arg_names)):
arg_name = all_arg_names[i]
if arg_name in kwargs:
if arg_name in apply_to:
new_kwargs[arg_name] = converter.convert(
input_array=kwargs[arg_name],
target_type=target_type)
else:
new_kwargs[arg_name] = kwargs[arg_name]
else:
default_value = default_arg_values[i - no_default_arg_num]
if arg_name in apply_to:
new_kwargs[arg_name] = converter.convert(
input_array=default_value, target_type=target_type)
else:
new_kwargs[arg_name] = default_value
if arg_name == template_arg_name:
template_arg_value = kwargs[arg_name]
# add nameless args provided by *args (if exists)
new_args += nameless_args
return_values = func(*new_args, **new_kwargs)
converter.set_template(template_arg_value)
def recursive_recover(input_data):
if isinstance(input_data, (tuple, list)):
new_data = []
for item in input_data:
new_data.append(recursive_recover(item))
return tuple(new_data) if isinstance(input_data,
tuple) else new_data
elif isinstance(input_data, dict):
new_data = {}
for k, v in input_data.items():
new_data[k] = recursive_recover(v)
return new_data
elif isinstance(input_data, (torch.Tensor, np.ndarray)):
return converter.recover(input_data)
else:
return input_data
if recover:
return recursive_recover(return_values)
else:
return return_values
return new_func | def array_converter_wrapper(func):
"""Outer wrapper for the function."""
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Inner wrapper for the arguments."""
if len(apply_to) == 0:
return func(*args, **kwargs)
func_name = func.__name__
arg_spec = getfullargspec(func)
arg_names = arg_spec.args
arg_num = len(arg_names)
default_arg_values = arg_spec.defaults
if default_arg_values is None:
default_arg_values = []
no_default_arg_num = len(arg_names) - len(default_arg_values)
kwonly_arg_names = arg_spec.kwonlyargs
kwonly_default_arg_values = arg_spec.kwonlydefaults
if kwonly_default_arg_values is None:
kwonly_default_arg_values = {}
all_arg_names = arg_names + kwonly_arg_names
# in case there are args in the form of *args
if len(args) > arg_num:
named_args = args[:arg_num]
nameless_args = args[arg_num:]
else:
named_args = args
nameless_args = []
# template argument data type is used for all array-like arguments
if template_arg_name_ is None:
template_arg_name = apply_to[0]
else:
template_arg_name = template_arg_name_
if template_arg_name not in all_arg_names:
raise ValueError(f'{template_arg_name} is not among the '
f'argument list of function {func_name}')
# inspect apply_to
for arg_to_apply in apply_to:
if arg_to_apply not in all_arg_names:
raise ValueError(f'{arg_to_apply} is not '
f'an argument of {func_name}')
new_args = []
new_kwargs = {}
converter = ArrayConverter()
target_type = torch.Tensor if to_torch else np.ndarray
# non-keyword arguments
for i, arg_value in enumerate(named_args):
if arg_names[i] in apply_to:
new_args.append(
converter.convert(
input_array=arg_value, target_type=target_type))
else:
new_args.append(arg_value)
if arg_names[i] == template_arg_name:
template_arg_value = arg_value
kwonly_default_arg_values.update(kwargs)
kwargs = kwonly_default_arg_values
# keyword arguments and non-keyword arguments using default value
for i in range(len(named_args), len(all_arg_names)):
arg_name = all_arg_names[i]
if arg_name in kwargs:
if arg_name in apply_to:
new_kwargs[arg_name] = converter.convert(
input_array=kwargs[arg_name],
target_type=target_type)
else:
new_kwargs[arg_name] = kwargs[arg_name]
else:
default_value = default_arg_values[i - no_default_arg_num]
if arg_name in apply_to:
new_kwargs[arg_name] = converter.convert(
input_array=default_value, target_type=target_type)
else:
new_kwargs[arg_name] = default_value
if arg_name == template_arg_name:
template_arg_value = kwargs[arg_name]
# add nameless args provided by *args (if exists)
new_args += nameless_args
return_values = func(*new_args, **new_kwargs)
converter.set_template(template_arg_value)
def recursive_recover(input_data):
if isinstance(input_data, (tuple, list)):
new_data = []
for item in input_data:
new_data.append(recursive_recover(item))
return tuple(new_data) if isinstance(input_data,
tuple) else new_data
elif isinstance(input_data, dict):
new_data = {}
for k, v in input_data.items():
new_data[k] = recursive_recover(v)
return new_data
elif isinstance(input_data, (torch.Tensor, np.ndarray)):
return converter.recover(input_data)
else:
return input_data
if recover:
return recursive_recover(return_values)
else:
return return_values
return new_func |
Python | def new_func(*args, **kwargs):
"""Inner wrapper for the arguments."""
if len(apply_to) == 0:
return func(*args, **kwargs)
func_name = func.__name__
arg_spec = getfullargspec(func)
arg_names = arg_spec.args
arg_num = len(arg_names)
default_arg_values = arg_spec.defaults
if default_arg_values is None:
default_arg_values = []
no_default_arg_num = len(arg_names) - len(default_arg_values)
kwonly_arg_names = arg_spec.kwonlyargs
kwonly_default_arg_values = arg_spec.kwonlydefaults
if kwonly_default_arg_values is None:
kwonly_default_arg_values = {}
all_arg_names = arg_names + kwonly_arg_names
# in case there are args in the form of *args
if len(args) > arg_num:
named_args = args[:arg_num]
nameless_args = args[arg_num:]
else:
named_args = args
nameless_args = []
# template argument data type is used for all array-like arguments
if template_arg_name_ is None:
template_arg_name = apply_to[0]
else:
template_arg_name = template_arg_name_
if template_arg_name not in all_arg_names:
raise ValueError(f'{template_arg_name} is not among the '
f'argument list of function {func_name}')
# inspect apply_to
for arg_to_apply in apply_to:
if arg_to_apply not in all_arg_names:
raise ValueError(f'{arg_to_apply} is not '
f'an argument of {func_name}')
new_args = []
new_kwargs = {}
converter = ArrayConverter()
target_type = torch.Tensor if to_torch else np.ndarray
# non-keyword arguments
for i, arg_value in enumerate(named_args):
if arg_names[i] in apply_to:
new_args.append(
converter.convert(
input_array=arg_value, target_type=target_type))
else:
new_args.append(arg_value)
if arg_names[i] == template_arg_name:
template_arg_value = arg_value
kwonly_default_arg_values.update(kwargs)
kwargs = kwonly_default_arg_values
# keyword arguments and non-keyword arguments using default value
for i in range(len(named_args), len(all_arg_names)):
arg_name = all_arg_names[i]
if arg_name in kwargs:
if arg_name in apply_to:
new_kwargs[arg_name] = converter.convert(
input_array=kwargs[arg_name],
target_type=target_type)
else:
new_kwargs[arg_name] = kwargs[arg_name]
else:
default_value = default_arg_values[i - no_default_arg_num]
if arg_name in apply_to:
new_kwargs[arg_name] = converter.convert(
input_array=default_value, target_type=target_type)
else:
new_kwargs[arg_name] = default_value
if arg_name == template_arg_name:
template_arg_value = kwargs[arg_name]
# add nameless args provided by *args (if exists)
new_args += nameless_args
return_values = func(*new_args, **new_kwargs)
converter.set_template(template_arg_value)
def recursive_recover(input_data):
if isinstance(input_data, (tuple, list)):
new_data = []
for item in input_data:
new_data.append(recursive_recover(item))
return tuple(new_data) if isinstance(input_data,
tuple) else new_data
elif isinstance(input_data, dict):
new_data = {}
for k, v in input_data.items():
new_data[k] = recursive_recover(v)
return new_data
elif isinstance(input_data, (torch.Tensor, np.ndarray)):
return converter.recover(input_data)
else:
return input_data
if recover:
return recursive_recover(return_values)
else:
return return_values | def new_func(*args, **kwargs):
"""Inner wrapper for the arguments."""
if len(apply_to) == 0:
return func(*args, **kwargs)
func_name = func.__name__
arg_spec = getfullargspec(func)
arg_names = arg_spec.args
arg_num = len(arg_names)
default_arg_values = arg_spec.defaults
if default_arg_values is None:
default_arg_values = []
no_default_arg_num = len(arg_names) - len(default_arg_values)
kwonly_arg_names = arg_spec.kwonlyargs
kwonly_default_arg_values = arg_spec.kwonlydefaults
if kwonly_default_arg_values is None:
kwonly_default_arg_values = {}
all_arg_names = arg_names + kwonly_arg_names
# in case there are args in the form of *args
if len(args) > arg_num:
named_args = args[:arg_num]
nameless_args = args[arg_num:]
else:
named_args = args
nameless_args = []
# template argument data type is used for all array-like arguments
if template_arg_name_ is None:
template_arg_name = apply_to[0]
else:
template_arg_name = template_arg_name_
if template_arg_name not in all_arg_names:
raise ValueError(f'{template_arg_name} is not among the '
f'argument list of function {func_name}')
# inspect apply_to
for arg_to_apply in apply_to:
if arg_to_apply not in all_arg_names:
raise ValueError(f'{arg_to_apply} is not '
f'an argument of {func_name}')
new_args = []
new_kwargs = {}
converter = ArrayConverter()
target_type = torch.Tensor if to_torch else np.ndarray
# non-keyword arguments
for i, arg_value in enumerate(named_args):
if arg_names[i] in apply_to:
new_args.append(
converter.convert(
input_array=arg_value, target_type=target_type))
else:
new_args.append(arg_value)
if arg_names[i] == template_arg_name:
template_arg_value = arg_value
kwonly_default_arg_values.update(kwargs)
kwargs = kwonly_default_arg_values
# keyword arguments and non-keyword arguments using default value
for i in range(len(named_args), len(all_arg_names)):
arg_name = all_arg_names[i]
if arg_name in kwargs:
if arg_name in apply_to:
new_kwargs[arg_name] = converter.convert(
input_array=kwargs[arg_name],
target_type=target_type)
else:
new_kwargs[arg_name] = kwargs[arg_name]
else:
default_value = default_arg_values[i - no_default_arg_num]
if arg_name in apply_to:
new_kwargs[arg_name] = converter.convert(
input_array=default_value, target_type=target_type)
else:
new_kwargs[arg_name] = default_value
if arg_name == template_arg_name:
template_arg_value = kwargs[arg_name]
# add nameless args provided by *args (if exists)
new_args += nameless_args
return_values = func(*new_args, **new_kwargs)
converter.set_template(template_arg_value)
def recursive_recover(input_data):
if isinstance(input_data, (tuple, list)):
new_data = []
for item in input_data:
new_data.append(recursive_recover(item))
return tuple(new_data) if isinstance(input_data,
tuple) else new_data
elif isinstance(input_data, dict):
new_data = {}
for k, v in input_data.items():
new_data[k] = recursive_recover(v)
return new_data
elif isinstance(input_data, (torch.Tensor, np.ndarray)):
return converter.recover(input_data)
else:
return input_data
if recover:
return recursive_recover(return_values)
else:
return return_values |
Python | def convert(self, input_array, target_type=None, target_array=None):
"""Convert input array to target data type.
Args:
input_array (tuple | list | np.ndarray |
torch.Tensor | int | float ):
Input array. Defaults to None.
target_type (<class 'np.ndarray'> | <class 'torch.Tensor'>,
optional):
Type to which input array is converted. Defaults to None.
target_array (np.ndarray | torch.Tensor, optional):
Template array to which input array is converted.
Defaults to None.
Raises:
ValueError: If input is list or tuple and cannot be converted to
to a NumPy array, a ValueError is raised.
TypeError: If input type does not belong to the above range,
or the contents of a list or tuple do not share the
same data type, a TypeError is raised.
"""
if isinstance(input_array, (list, tuple)):
try:
input_array = np.array(input_array)
if input_array.dtype not in self.SUPPORTED_NON_ARRAY_TYPES:
raise TypeError
except (ValueError, TypeError):
print(f'The input cannot be converted to'
f' a single-type numpy array:\n{input_array}')
raise
elif isinstance(input_array, self.SUPPORTED_NON_ARRAY_TYPES):
input_array = np.array(input_array)
array_type = type(input_array)
assert target_type is not None or target_array is not None, \
'must specify a target'
if target_type is not None:
assert target_type in (np.ndarray, torch.Tensor), \
'invalid target type'
if target_type == array_type:
return input_array
elif target_type == np.ndarray:
# default dtype is float32
converted_array = input_array.cpu().numpy().astype(np.float32)
else:
# default dtype is float32, device is 'cpu'
converted_array = torch.tensor(
input_array, dtype=torch.float32)
else:
assert isinstance(target_array, (np.ndarray, torch.Tensor)), \
'invalid target array type'
if isinstance(target_array, array_type):
return input_array
elif isinstance(target_array, np.ndarray):
converted_array = input_array.cpu().numpy().astype(
target_array.dtype)
else:
converted_array = target_array.new_tensor(input_array)
return converted_array | def convert(self, input_array, target_type=None, target_array=None):
"""Convert input array to target data type.
Args:
input_array (tuple | list | np.ndarray |
torch.Tensor | int | float ):
Input array. Defaults to None.
target_type (<class 'np.ndarray'> | <class 'torch.Tensor'>,
optional):
Type to which input array is converted. Defaults to None.
target_array (np.ndarray | torch.Tensor, optional):
Template array to which input array is converted.
Defaults to None.
Raises:
ValueError: If input is list or tuple and cannot be converted to
to a NumPy array, a ValueError is raised.
TypeError: If input type does not belong to the above range,
or the contents of a list or tuple do not share the
same data type, a TypeError is raised.
"""
if isinstance(input_array, (list, tuple)):
try:
input_array = np.array(input_array)
if input_array.dtype not in self.SUPPORTED_NON_ARRAY_TYPES:
raise TypeError
except (ValueError, TypeError):
print(f'The input cannot be converted to'
f' a single-type numpy array:\n{input_array}')
raise
elif isinstance(input_array, self.SUPPORTED_NON_ARRAY_TYPES):
input_array = np.array(input_array)
array_type = type(input_array)
assert target_type is not None or target_array is not None, \
'must specify a target'
if target_type is not None:
assert target_type in (np.ndarray, torch.Tensor), \
'invalid target type'
if target_type == array_type:
return input_array
elif target_type == np.ndarray:
# default dtype is float32
converted_array = input_array.cpu().numpy().astype(np.float32)
else:
# default dtype is float32, device is 'cpu'
converted_array = torch.tensor(
input_array, dtype=torch.float32)
else:
assert isinstance(target_array, (np.ndarray, torch.Tensor)), \
'invalid target array type'
if isinstance(target_array, array_type):
return input_array
elif isinstance(target_array, np.ndarray):
converted_array = input_array.cpu().numpy().astype(
target_array.dtype)
else:
converted_array = target_array.new_tensor(input_array)
return converted_array |
Python | def voxelize_hv(self, points):
"""Apply hard voxelization to points."""
voxels, coors, num_points = [], [], []
for res in points:
res_voxels, res_coors, res_num_points = self.pts_voxel_layer(res)
voxels.append(res_voxels)
coors.append(res_coors)
num_points.append(res_num_points)
voxels = torch.cat(voxels, dim=0)
num_points = torch.cat(num_points, dim=0)
coors_batch = []
for i, coor in enumerate(coors):
coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)
coors_batch.append(coor_pad)
coors_batch = torch.cat(coors_batch, dim=0)
return voxels, num_points, coors_batch | def voxelize_hv(self, points):
"""Apply hard voxelization to points."""
voxels, coors, num_points = [], [], []
for res in points:
res_voxels, res_coors, res_num_points = self.pts_voxel_layer(res)
voxels.append(res_voxels)
coors.append(res_coors)
num_points.append(res_num_points)
voxels = torch.cat(voxels, dim=0)
num_points = torch.cat(num_points, dim=0)
coors_batch = []
for i, coor in enumerate(coors):
coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)
coors_batch.append(coor_pad)
coors_batch = torch.cat(coors_batch, dim=0)
return voxels, num_points, coors_batch |
Python | def voxelize(self, points):
"""Apply dynamic voxelization to points.
Args:
points (list[torch.Tensor]): Points of each sample.
Returns:
tuple[torch.Tensor]: Concatenated points and coordinates.
"""
coors = []
# dynamic voxelization only provide a coors mapping
for res in points:
res_coors = self.pts_voxel_layer(res)
coors.append(res_coors)
points = torch.cat(points, dim=0)
coors_batch = []
for i, coor in enumerate(coors):
coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)
coors_batch.append(coor_pad)
coors_batch = torch.cat(coors_batch, dim=0)
return points, coors_batch | def voxelize(self, points):
"""Apply dynamic voxelization to points.
Args:
points (list[torch.Tensor]): Points of each sample.
Returns:
tuple[torch.Tensor]: Concatenated points and coordinates.
"""
coors = []
# dynamic voxelization only provide a coors mapping
for res in points:
res_coors = self.pts_voxel_layer(res)
coors.append(res_coors)
points = torch.cat(points, dim=0)
coors_batch = []
for i, coor in enumerate(coors):
coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)
coors_batch.append(coor_pad)
coors_batch = torch.cat(coors_batch, dim=0)
return points, coors_batch |
Python | def extract_feat(self, points, img, img_metas, train):
"""Extract features from images and points."""
img_feats = self.extract_img_feat(img, img_metas)
pts_feats, pts_aux_feats, img_feats = self.extract_pts_feat(
points, img_feats, img_metas, train, self.pts_li_fusion_layer)
if train:
return img_feats, pts_feats, pts_aux_feats
else:
return img_feats, pts_feats | def extract_feat(self, points, img, img_metas, train):
"""Extract features from images and points."""
img_feats = self.extract_img_feat(img, img_metas)
pts_feats, pts_aux_feats, img_feats = self.extract_pts_feat(
points, img_feats, img_metas, train, self.pts_li_fusion_layer)
if train:
return img_feats, pts_feats, pts_aux_feats
else:
return img_feats, pts_feats |
Python | def mono3d_load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
# The order of returned `cat_ids` will not
# change with the order of the CLASSES
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos | def mono3d_load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
# The order of returned `cat_ids` will not
# change with the order of the CLASSES
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos |
Python | def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
gt_bboxes_cam3d = []
centers2d = []
depths = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
pass
# continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
# 3D annotations in camera coordinates
bbox_cam3d = np.array(ann['bbox_cam3d']).reshape(-1, )
gt_bboxes_cam3d.append(bbox_cam3d)
# 2.5D annotations in camera coordinates
center2d = ann['center2d'][:2]
depth = ann['center2d'][2]
centers2d.append(center2d)
depths.append(depth)
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_cam3d:
gt_bboxes_cam3d = np.array(gt_bboxes_cam3d, dtype=np.float32)
centers2d = np.array(centers2d, dtype=np.float32)
depths = np.array(depths, dtype=np.float32)
else:
gt_bboxes_cam3d = np.zeros((0, self.bbox_code_size),
dtype=np.float32)
centers2d = np.zeros((0, 2), dtype=np.float32)
depths = np.zeros((0), dtype=np.float32)
gt_bboxes_cam3d = CameraInstance3DBoxes(
gt_bboxes_cam3d,
box_dim=gt_bboxes_cam3d.shape[-1],
origin=(0.5, 0.5, 0.5))
gt_labels_3d = copy.deepcopy(gt_labels)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
gt_bboxes_3d=gt_bboxes_cam3d,
gt_labels_3d=gt_labels_3d,
centers2d=centers2d,
depths=depths,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann | def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
gt_bboxes_cam3d = []
centers2d = []
depths = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
pass
# continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
# 3D annotations in camera coordinates
bbox_cam3d = np.array(ann['bbox_cam3d']).reshape(-1, )
gt_bboxes_cam3d.append(bbox_cam3d)
# 2.5D annotations in camera coordinates
center2d = ann['center2d'][:2]
depth = ann['center2d'][2]
centers2d.append(center2d)
depths.append(depth)
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_cam3d:
gt_bboxes_cam3d = np.array(gt_bboxes_cam3d, dtype=np.float32)
centers2d = np.array(centers2d, dtype=np.float32)
depths = np.array(depths, dtype=np.float32)
else:
gt_bboxes_cam3d = np.zeros((0, self.bbox_code_size),
dtype=np.float32)
centers2d = np.zeros((0, 2), dtype=np.float32)
depths = np.zeros((0), dtype=np.float32)
gt_bboxes_cam3d = CameraInstance3DBoxes(
gt_bboxes_cam3d,
box_dim=gt_bboxes_cam3d.shape[-1],
origin=(0.5, 0.5, 0.5))
gt_labels_3d = copy.deepcopy(gt_labels)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
gt_bboxes_3d=gt_bboxes_cam3d,
gt_labels_3d=gt_labels_3d,
centers2d=centers2d,
depths=depths,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann |
Python | def format_results(self,
outputs,
pklfile_prefix=None,
submission_prefix=None):
"""Format the results to pkl file.
Args:
outputs (list[dict]): Testing results of the dataset.
pklfile_prefix (str | None): The prefix of pkl files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
submission_prefix (str | None): The prefix of submitted files. It
includes the file path and the prefix of filename, e.g.,
"a/b/prefix". If not specified, a temp file will be created.
Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
if pklfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
pklfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
if not isinstance(outputs[0], dict):
result_files = self.bbox2result_kitti2d(outputs, self.CLASSES,
pklfile_prefix,
submission_prefix)
elif 'pts_bbox' in outputs[0] or 'img_bbox' in outputs[0] or \
'img_bbox2d' in outputs[0]:
result_files = dict()
for name in outputs[0]:
results_ = [out[name] for out in outputs]
pklfile_prefix_ = pklfile_prefix + name
if submission_prefix is not None:
submission_prefix_ = submission_prefix + name
else:
submission_prefix_ = None
if '2d' in name:
result_files_ = self.bbox2result_kitti2d(
results_, self.CLASSES, pklfile_prefix_,
submission_prefix_)
elif name == 'pts_bbox':
result_files_ = self.bbox2result_kitti(
results_, self.CLASSES, pklfile_prefix_,
submission_prefix_)
elif name == 'img_bbox':
result_files_ = self.bbox2result_kitti_3dcam(
results_, self.CLASSES, pklfile_prefix_,
submission_prefix_)
result_files[name] = result_files_
else:
result_files = self.bbox2result_kitti(outputs, self.CLASSES,
pklfile_prefix,
submission_prefix)
return result_files, tmp_dir | def format_results(self,
outputs,
pklfile_prefix=None,
submission_prefix=None):
"""Format the results to pkl file.
Args:
outputs (list[dict]): Testing results of the dataset.
pklfile_prefix (str | None): The prefix of pkl files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
submission_prefix (str | None): The prefix of submitted files. It
includes the file path and the prefix of filename, e.g.,
"a/b/prefix". If not specified, a temp file will be created.
Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
if pklfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
pklfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
if not isinstance(outputs[0], dict):
result_files = self.bbox2result_kitti2d(outputs, self.CLASSES,
pklfile_prefix,
submission_prefix)
elif 'pts_bbox' in outputs[0] or 'img_bbox' in outputs[0] or \
'img_bbox2d' in outputs[0]:
result_files = dict()
for name in outputs[0]:
results_ = [out[name] for out in outputs]
pklfile_prefix_ = pklfile_prefix + name
if submission_prefix is not None:
submission_prefix_ = submission_prefix + name
else:
submission_prefix_ = None
if '2d' in name:
result_files_ = self.bbox2result_kitti2d(
results_, self.CLASSES, pklfile_prefix_,
submission_prefix_)
elif name == 'pts_bbox':
result_files_ = self.bbox2result_kitti(
results_, self.CLASSES, pklfile_prefix_,
submission_prefix_)
elif name == 'img_bbox':
result_files_ = self.bbox2result_kitti_3dcam(
results_, self.CLASSES, pklfile_prefix_,
submission_prefix_)
result_files[name] = result_files_
else:
result_files = self.bbox2result_kitti(outputs, self.CLASSES,
pklfile_prefix,
submission_prefix)
return result_files, tmp_dir |
Python | def bbox2result_kitti_3dcam(self,
net_outputs,
class_names,
pklfile_prefix=None,
submission_prefix=None):
"""Convert 3D detection results to kitti format for evaluation and test
submission.
Args:
net_outputs (list[np.ndarray]): List of array storing the \
inferenced bounding boxes and scores.
class_names (list[String]): A list of class names.
pklfile_prefix (str | None): The prefix of pkl file.
submission_prefix (str | None): The prefix of submission file.
Returns:
list[dict]: A list of dictionaries with the kitti format.
"""
assert len(net_outputs) == len(self.data_infos)
if submission_prefix is not None:
mmcv.mkdir_or_exist(submission_prefix)
det_annos = []
print('\nConverting prediction to KITTI format')
for idx, pred_dicts in enumerate(
mmcv.track_iter_progress(net_outputs)):
annos = []
info = self.data_infos[idx]
sample_idx = info['image']['image_idx']
image_shape = info['image']['image_shape'][:2]
box_dict = self.convert_valid_bboxes_3dcam(pred_dicts, info)
anno = {
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'rotation_y': [],
'score': []
}
if len(box_dict['bbox']) > 0:
box_2d_preds = box_dict['bbox']
box_preds = box_dict['box3d_camera']
scores = box_dict['scores']
box_preds_lidar = box_dict['box3d_lidar']
label_preds = box_dict['label_preds']
for box, box_lidar, bbox, score, label in zip(
box_preds, box_preds_lidar, box_2d_preds, scores,
label_preds):
bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])
bbox[:2] = np.maximum(bbox[:2], [0, 0])
anno['name'].append(class_names[int(label)])
anno['truncated'].append(0.0)
anno['occluded'].append(0)
anno['alpha'].append(-np.arctan2(box[0], box[2]) + box[6])
anno['bbox'].append(bbox)
anno['dimensions'].append(box[3:6])
anno['location'].append(box[:3])
anno['rotation_y'].append(box[6])
anno['score'].append(score)
anno = {k: np.stack(v) for k, v in anno.items()}
annos.append(anno)
else:
anno = {
'name': np.array([]),
'truncated': np.array([]),
'occluded': np.array([]),
'alpha': np.array([]),
'bbox': np.zeros([0, 4]),
'dimensions': np.zeros([0, 3]),
'location': np.zeros([0, 3]),
'rotation_y': np.array([]),
'score': np.array([]),
}
annos.append(anno)
if submission_prefix is not None:
curr_file = f'{submission_prefix}/{sample_idx:06d}.txt'
with open(curr_file, 'w') as f:
bbox = anno['bbox']
loc = anno['location']
dims = anno['dimensions'] # lhw -> hwl
for idx in range(len(bbox)):
print(
'{} -1 -1 {:.4f} {:.4f} {:.4f} {:.4f} '
'{:.4f} {:.4f} {:.4f} '
'{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.format(
anno['name'][idx], anno['alpha'][idx],
bbox[idx][0], bbox[idx][1], bbox[idx][2],
bbox[idx][3], dims[idx][1], dims[idx][2],
dims[idx][0], loc[idx][0], loc[idx][1],
loc[idx][2], anno['rotation_y'][idx],
anno['score'][idx]),
file=f)
annos[-1]['sample_idx'] = np.array(
[sample_idx] * len(annos[-1]['score']), dtype=np.int64)
det_annos += annos
if pklfile_prefix is not None:
if not pklfile_prefix.endswith(('.pkl', '.pickle')):
out = f'{pklfile_prefix}.pkl'
mmcv.dump(det_annos, out)
print('Result is saved to %s' % out)
return det_annos | def bbox2result_kitti_3dcam(self,
net_outputs,
class_names,
pklfile_prefix=None,
submission_prefix=None):
"""Convert 3D detection results to kitti format for evaluation and test
submission.
Args:
net_outputs (list[np.ndarray]): List of array storing the \
inferenced bounding boxes and scores.
class_names (list[String]): A list of class names.
pklfile_prefix (str | None): The prefix of pkl file.
submission_prefix (str | None): The prefix of submission file.
Returns:
list[dict]: A list of dictionaries with the kitti format.
"""
assert len(net_outputs) == len(self.data_infos)
if submission_prefix is not None:
mmcv.mkdir_or_exist(submission_prefix)
det_annos = []
print('\nConverting prediction to KITTI format')
for idx, pred_dicts in enumerate(
mmcv.track_iter_progress(net_outputs)):
annos = []
info = self.data_infos[idx]
sample_idx = info['image']['image_idx']
image_shape = info['image']['image_shape'][:2]
box_dict = self.convert_valid_bboxes_3dcam(pred_dicts, info)
anno = {
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'rotation_y': [],
'score': []
}
if len(box_dict['bbox']) > 0:
box_2d_preds = box_dict['bbox']
box_preds = box_dict['box3d_camera']
scores = box_dict['scores']
box_preds_lidar = box_dict['box3d_lidar']
label_preds = box_dict['label_preds']
for box, box_lidar, bbox, score, label in zip(
box_preds, box_preds_lidar, box_2d_preds, scores,
label_preds):
bbox[2:] = np.minimum(bbox[2:], image_shape[::-1])
bbox[:2] = np.maximum(bbox[:2], [0, 0])
anno['name'].append(class_names[int(label)])
anno['truncated'].append(0.0)
anno['occluded'].append(0)
anno['alpha'].append(-np.arctan2(box[0], box[2]) + box[6])
anno['bbox'].append(bbox)
anno['dimensions'].append(box[3:6])
anno['location'].append(box[:3])
anno['rotation_y'].append(box[6])
anno['score'].append(score)
anno = {k: np.stack(v) for k, v in anno.items()}
annos.append(anno)
else:
anno = {
'name': np.array([]),
'truncated': np.array([]),
'occluded': np.array([]),
'alpha': np.array([]),
'bbox': np.zeros([0, 4]),
'dimensions': np.zeros([0, 3]),
'location': np.zeros([0, 3]),
'rotation_y': np.array([]),
'score': np.array([]),
}
annos.append(anno)
if submission_prefix is not None:
curr_file = f'{submission_prefix}/{sample_idx:06d}.txt'
with open(curr_file, 'w') as f:
bbox = anno['bbox']
loc = anno['location']
dims = anno['dimensions'] # lhw -> hwl
for idx in range(len(bbox)):
print(
'{} -1 -1 {:.4f} {:.4f} {:.4f} {:.4f} '
'{:.4f} {:.4f} {:.4f} '
'{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.format(
anno['name'][idx], anno['alpha'][idx],
bbox[idx][0], bbox[idx][1], bbox[idx][2],
bbox[idx][3], dims[idx][1], dims[idx][2],
dims[idx][0], loc[idx][0], loc[idx][1],
loc[idx][2], anno['rotation_y'][idx],
anno['score'][idx]),
file=f)
annos[-1]['sample_idx'] = np.array(
[sample_idx] * len(annos[-1]['score']), dtype=np.int64)
det_annos += annos
if pklfile_prefix is not None:
if not pklfile_prefix.endswith(('.pkl', '.pickle')):
out = f'{pklfile_prefix}.pkl'
mmcv.dump(det_annos, out)
print('Result is saved to %s' % out)
return det_annos |
Python | def convert_valid_bboxes_3dcam(self, box_dict, info):
"""Convert the predicted boxes into valid ones.
Args:
box_dict (dict): Box dictionaries to be converted.
- boxes_3d (:obj:`CameraInstance3DBoxes`): 3D bounding boxes.
- scores_3d (torch.Tensor): Scores of boxes.
- labels_3d (torch.Tensor): Class labels of boxes.
info (dict): Data info.
Returns:
dict: Valid predicted boxes.
- bbox (np.ndarray): 2D bounding boxes.
- box3d_camera (np.ndarray): 3D bounding boxes in \
camera coordinate.
- scores (np.ndarray): Scores of boxes.
- label_preds (np.ndarray): Class label predictions.
- sample_idx (int): Sample index.
"""
box_preds = box_dict['boxes_3d']
scores = box_dict['scores_3d']
labels = box_dict['labels_3d']
sample_idx = info['image']['image_idx']
if len(box_preds) == 0:
return dict(
bbox=np.zeros([0, 4]),
box3d_camera=np.zeros([0, 7]),
scores=np.zeros([0]),
label_preds=np.zeros([0, 4]),
sample_idx=sample_idx)
rect = info['calib']['R0_rect'].astype(np.float32)
Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)
P2 = info['calib']['P2'].astype(np.float32)
img_shape = info['image']['image_shape']
P2 = box_preds.tensor.new_tensor(P2)
box_preds_camera = box_preds
box_preds_lidar = box_preds.convert_to(Box3DMode.LIDAR,
np.linalg.inv(rect @ Trv2c))
box_corners = box_preds_camera.corners
box_corners_in_image = points_cam2img(box_corners, P2)
# box_corners_in_image: [N, 8, 2]
minxy = torch.min(box_corners_in_image, dim=1)[0]
maxxy = torch.max(box_corners_in_image, dim=1)[0]
box_2d_preds = torch.cat([minxy, maxxy], dim=1)
# Post-processing
# check box_preds_camera
image_shape = box_preds.tensor.new_tensor(img_shape)
valid_cam_inds = ((box_2d_preds[:, 0] < image_shape[1]) &
(box_2d_preds[:, 1] < image_shape[0]) &
(box_2d_preds[:, 2] > 0) & (box_2d_preds[:, 3] > 0))
# check box_preds
valid_inds = valid_cam_inds
if valid_inds.sum() > 0:
return dict(
bbox=box_2d_preds[valid_inds, :].numpy(),
box3d_camera=box_preds_camera[valid_inds].tensor.numpy(),
box3d_lidar=box_preds_lidar[valid_inds].tensor.numpy(),
scores=scores[valid_inds].numpy(),
label_preds=labels[valid_inds].numpy(),
sample_idx=sample_idx)
else:
return dict(
bbox=np.zeros([0, 4]),
box3d_camera=np.zeros([0, 7]),
box3d_lidar=np.zeros([0, 7]),
scores=np.zeros([0]),
label_preds=np.zeros([0, 4]),
sample_idx=sample_idx) | def convert_valid_bboxes_3dcam(self, box_dict, info):
"""Convert the predicted boxes into valid ones.
Args:
box_dict (dict): Box dictionaries to be converted.
- boxes_3d (:obj:`CameraInstance3DBoxes`): 3D bounding boxes.
- scores_3d (torch.Tensor): Scores of boxes.
- labels_3d (torch.Tensor): Class labels of boxes.
info (dict): Data info.
Returns:
dict: Valid predicted boxes.
- bbox (np.ndarray): 2D bounding boxes.
- box3d_camera (np.ndarray): 3D bounding boxes in \
camera coordinate.
- scores (np.ndarray): Scores of boxes.
- label_preds (np.ndarray): Class label predictions.
- sample_idx (int): Sample index.
"""
box_preds = box_dict['boxes_3d']
scores = box_dict['scores_3d']
labels = box_dict['labels_3d']
sample_idx = info['image']['image_idx']
if len(box_preds) == 0:
return dict(
bbox=np.zeros([0, 4]),
box3d_camera=np.zeros([0, 7]),
scores=np.zeros([0]),
label_preds=np.zeros([0, 4]),
sample_idx=sample_idx)
rect = info['calib']['R0_rect'].astype(np.float32)
Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)
P2 = info['calib']['P2'].astype(np.float32)
img_shape = info['image']['image_shape']
P2 = box_preds.tensor.new_tensor(P2)
box_preds_camera = box_preds
box_preds_lidar = box_preds.convert_to(Box3DMode.LIDAR,
np.linalg.inv(rect @ Trv2c))
box_corners = box_preds_camera.corners
box_corners_in_image = points_cam2img(box_corners, P2)
# box_corners_in_image: [N, 8, 2]
minxy = torch.min(box_corners_in_image, dim=1)[0]
maxxy = torch.max(box_corners_in_image, dim=1)[0]
box_2d_preds = torch.cat([minxy, maxxy], dim=1)
# Post-processing
# check box_preds_camera
image_shape = box_preds.tensor.new_tensor(img_shape)
valid_cam_inds = ((box_2d_preds[:, 0] < image_shape[1]) &
(box_2d_preds[:, 1] < image_shape[0]) &
(box_2d_preds[:, 2] > 0) & (box_2d_preds[:, 3] > 0))
# check box_preds
valid_inds = valid_cam_inds
if valid_inds.sum() > 0:
return dict(
bbox=box_2d_preds[valid_inds, :].numpy(),
box3d_camera=box_preds_camera[valid_inds].tensor.numpy(),
box3d_lidar=box_preds_lidar[valid_inds].tensor.numpy(),
scores=scores[valid_inds].numpy(),
label_preds=labels[valid_inds].numpy(),
sample_idx=sample_idx)
else:
return dict(
bbox=np.zeros([0, 4]),
box3d_camera=np.zeros([0, 7]),
box3d_lidar=np.zeros([0, 7]),
scores=np.zeros([0]),
label_preds=np.zeros([0, 4]),
sample_idx=sample_idx) |
Python | def _sample_wimg(self, num, sample_idx):
"""Sample with img which has same calibration matrix with
current sample's calibration matrix
"""
ret = []
while len(ret) < num:
if self._idx == self._example_num:
self._reset()
sampled_dict = self._sampled_list[self._indices[self._idx]]
if 'img_path' not in sampled_dict:
self._idx += 1
continue
# if self._scene_list[
# sampled_dict['image_idx']] != self._scene_list[sample_idx]:
# self._idx += 1
# continue
ret.append(self._indices[self._idx])
self._idx += 1
if self._idx == self._example_num:
self._reset()
return ret | def _sample_wimg(self, num, sample_idx):
"""Sample with img which has same calibration matrix with
current sample's calibration matrix
"""
ret = []
while len(ret) < num:
if self._idx == self._example_num:
self._reset()
sampled_dict = self._sampled_list[self._indices[self._idx]]
if 'img_path' not in sampled_dict:
self._idx += 1
continue
# if self._scene_list[
# sampled_dict['image_idx']] != self._scene_list[sample_idx]:
# self._idx += 1
# continue
ret.append(self._indices[self._idx])
self._idx += 1
if self._idx == self._example_num:
self._reset()
return ret |
Python | def sample(self, num, with_img=False, sample_idx=None):
"""Sample specific number of ground truths.
Args:
num (int): Sampled number.
Returns:
list[dict]: Sampled ground truths.
"""
if with_img:
indices = self._sample_wimg(num, sample_idx)
else:
indices = self._sample(num)
return [self._sampled_list[i] for i in indices] | def sample(self, num, with_img=False, sample_idx=None):
"""Sample specific number of ground truths.
Args:
num (int): Sampled number.
Returns:
list[dict]: Sampled ground truths.
"""
if with_img:
indices = self._sample_wimg(num, sample_idx)
else:
indices = self._sample(num)
return [self._sampled_list[i] for i in indices] |
Python | def sample_class_v2(self,
name,
num,
gt_bboxes,
gt_bboxes_2d=None,
sample_idx=None):
"""Sampling specific categories of bounding boxes.
Args:
name (str): Class of objects to be sampled.
num (int): Number of sampled bboxes.
gt_bboxes (np.ndarray): Ground truth boxes.
Returns:
list[dict]: Valid samples after collision test.
"""
sampled = self.sampler_dict[name].sample(num, self.with_img,
sample_idx)
sampled = copy.deepcopy(sampled)
num_gt = gt_bboxes.shape[0]
num_sampled = len(sampled)
gt_bboxes_bv = box_np_ops.center_to_corner_box2d(
gt_bboxes[:, 0:2], gt_bboxes[:, 3:5], gt_bboxes[:, 6])
sp_boxes = np.stack([i['box3d_lidar'] for i in sampled], axis=0)
boxes = np.concatenate([gt_bboxes, sp_boxes], axis=0).copy()
sp_boxes_new = boxes[gt_bboxes.shape[0]:]
sp_boxes_bv = box_np_ops.center_to_corner_box2d(
sp_boxes_new[:, 0:2], sp_boxes_new[:, 3:5], sp_boxes_new[:, 6])
total_bv = np.concatenate([gt_bboxes_bv, sp_boxes_bv], axis=0)
coll_mat = data_augment_utils.box_collision_test(total_bv, total_bv)
diag = np.arange(total_bv.shape[0])
coll_mat[diag, diag] = False
valid_samples = []
valid_check = [False for _ in range(num_gt + num_sampled)]
for i in range(num_gt, num_gt + num_sampled):
if coll_mat[i].any():
coll_mat[i] = False
coll_mat[:, i] = False
valid_check[i] = False
else:
valid_check[i] = True
if self.with_img:
cv_boxes = np.stack([i['bbox'] for i in sampled], axis=0)
total_cv = np.concatenate([gt_bboxes_2d, cv_boxes], axis=0)
overlaps_iou = box_np_ops.overlap_jit(total_cv)
# overlaps_iou = box_np_ops.iou_jit(total_cv, total_cv)
overlaps_iou[diag, diag] = 0.
for i in range(num_gt, num_gt + num_sampled):
if (overlaps_iou[i].max() > self.overlap_2d_thres) or (
overlaps_iou[:, i].max() > self.overlap_2d_thres):
overlaps_iou[i] = 0.
overlaps_iou[:, i] = 0.
valid_check[i] = False
for i in range(num_gt, num_gt + num_sampled):
if valid_check[i]:
valid_samples.append(sampled[i - num_gt])
return valid_samples | def sample_class_v2(self,
name,
num,
gt_bboxes,
gt_bboxes_2d=None,
sample_idx=None):
"""Sampling specific categories of bounding boxes.
Args:
name (str): Class of objects to be sampled.
num (int): Number of sampled bboxes.
gt_bboxes (np.ndarray): Ground truth boxes.
Returns:
list[dict]: Valid samples after collision test.
"""
sampled = self.sampler_dict[name].sample(num, self.with_img,
sample_idx)
sampled = copy.deepcopy(sampled)
num_gt = gt_bboxes.shape[0]
num_sampled = len(sampled)
gt_bboxes_bv = box_np_ops.center_to_corner_box2d(
gt_bboxes[:, 0:2], gt_bboxes[:, 3:5], gt_bboxes[:, 6])
sp_boxes = np.stack([i['box3d_lidar'] for i in sampled], axis=0)
boxes = np.concatenate([gt_bboxes, sp_boxes], axis=0).copy()
sp_boxes_new = boxes[gt_bboxes.shape[0]:]
sp_boxes_bv = box_np_ops.center_to_corner_box2d(
sp_boxes_new[:, 0:2], sp_boxes_new[:, 3:5], sp_boxes_new[:, 6])
total_bv = np.concatenate([gt_bboxes_bv, sp_boxes_bv], axis=0)
coll_mat = data_augment_utils.box_collision_test(total_bv, total_bv)
diag = np.arange(total_bv.shape[0])
coll_mat[diag, diag] = False
valid_samples = []
valid_check = [False for _ in range(num_gt + num_sampled)]
for i in range(num_gt, num_gt + num_sampled):
if coll_mat[i].any():
coll_mat[i] = False
coll_mat[:, i] = False
valid_check[i] = False
else:
valid_check[i] = True
if self.with_img:
cv_boxes = np.stack([i['bbox'] for i in sampled], axis=0)
total_cv = np.concatenate([gt_bboxes_2d, cv_boxes], axis=0)
overlaps_iou = box_np_ops.overlap_jit(total_cv)
# overlaps_iou = box_np_ops.iou_jit(total_cv, total_cv)
overlaps_iou[diag, diag] = 0.
for i in range(num_gt, num_gt + num_sampled):
if (overlaps_iou[i].max() > self.overlap_2d_thres) or (
overlaps_iou[:, i].max() > self.overlap_2d_thres):
overlaps_iou[i] = 0.
overlaps_iou[:, i] = 0.
valid_check[i] = False
for i in range(num_gt, num_gt + num_sampled):
if valid_check[i]:
valid_samples.append(sampled[i - num_gt])
return valid_samples |
Python | def cog_unload(self):
"""
Remove the rocode jobs and safely shutdown the scheduler
:return: Nothing
"""
self.rocode_job.remove()
self.scheduler.shutdown() | def cog_unload(self):
"""
Remove the rocode jobs and safely shutdown the scheduler
:return: Nothing
"""
self.rocode_job.remove()
self.scheduler.shutdown() |
Python | def generate_data(self, count, offset, threads):
"""
Generates training data in the CRF++ format for the ingredient
tagging task
"""
df = pd.read_csv(self.opts.data_path)
df = df.fillna("")
start = offset
end = offset + count
df_slice = df.iloc[start: end]
qr = Process(target=self.start_queue_reader)
qr.start()
worker_pool = Pool(processes=threads or None)
worker_pool.map_async(self._generate_data_worker, df_slice.iterrows())
worker_pool.close()
worker_pool.join()
self.output_queue.put('DONE')
qr.join() | def generate_data(self, count, offset, threads):
"""
Generates training data in the CRF++ format for the ingredient
tagging task
"""
df = pd.read_csv(self.opts.data_path)
df = df.fillna("")
start = offset
end = offset + count
df_slice = df.iloc[start: end]
qr = Process(target=self.start_queue_reader)
qr.start()
worker_pool = Pool(processes=threads or None)
worker_pool.map_async(self._generate_data_worker, df_slice.iterrows())
worker_pool.close()
worker_pool.join()
self.output_queue.put('DONE')
qr.join() |
Python | def _parse_args(self, argv):
"""
Parse the command-line arguments into a dict.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--count", default=100, type=int, help=' ')
parser.add_argument("--offset", default=0, type=int, help=' ')
parser.add_argument("--threads", default=0, type=int, help=' ')
parser.add_argument("--data-path", default="nyt-ingredients-snapshot-2015.csv", help=' ')
return parser.parse_args(argv) | def _parse_args(self, argv):
"""
Parse the command-line arguments into a dict.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--count", default=100, type=int, help=' ')
parser.add_argument("--offset", default=0, type=int, help=' ')
parser.add_argument("--threads", default=0, type=int, help=' ')
parser.add_argument("--data-path", default="nyt-ingredients-snapshot-2015.csv", help=' ')
return parser.parse_args(argv) |
Python | def discover():
"""
Auto-discover any Gutter configuration present in the django
INSTALLED_APPS.
"""
from django.conf import settings
from django.utils.importlib import import_module
for app in settings.INSTALLED_APPS:
module = '%s.gutter' % app
try:
import_module(module)
logger.info('Successfully autodiscovered %s' % module)
except:
pass | def discover():
"""
Auto-discover any Gutter configuration present in the django
INSTALLED_APPS.
"""
from django.conf import settings
from django.utils.importlib import import_module
for app in settings.INSTALLED_APPS:
module = '%s.gutter' % app
try:
import_module(module)
logger.info('Successfully autodiscovered %s' % module)
except:
pass |
Python | def run_subs(data):
''' Finds the initial position of given motif in DNA sequence '''
dna, motif = data.split('\n')
dna = DNA(dna)
return ' '.join(str(i + 1) for i in dna.find_motif(motif)) | def run_subs(data):
''' Finds the initial position of given motif in DNA sequence '''
dna, motif = data.split('\n')
dna = DNA(dna)
return ' '.join(str(i + 1) for i in dna.find_motif(motif)) |
Python | def run_mrna(prot, mod=1000000):
''' Calculates the number of mRNA that encodes a protein mod 1E6 '''
if '*' not in prot:
# Add Stop if missing
prot += '*'
p = 1
for ama in prot:
p *= amino_table[ama]
p %= mod # Prevent overflow
return p | def run_mrna(prot, mod=1000000):
''' Calculates the number of mRNA that encodes a protein mod 1E6 '''
if '*' not in prot:
# Add Stop if missing
prot += '*'
p = 1
for ama in prot:
p *= amino_table[ama]
p %= mod # Prevent overflow
return p |
Python | def find_motif(self, motif):
''' Find initial positions of given motif '''
positions = []
for i in range(len(self.sequence) - len(motif)):
if self.sequence[i:i + len(motif)] == motif:
positions.append(i)
return positions | def find_motif(self, motif):
''' Find initial positions of given motif '''
positions = []
for i in range(len(self.sequence) - len(motif)):
if self.sequence[i:i + len(motif)] == motif:
positions.append(i)
return positions |
Python | def reverse_complement(self):
''' Returns a string containing the reverse complement '''
return DNA(''.join(
DNA.complements[base]
for base in reversed(self.sequence))) | def reverse_complement(self):
''' Returns a string containing the reverse complement '''
return DNA(''.join(
DNA.complements[base]
for base in reversed(self.sequence))) |
Python | def to_proteins(self):
''' Returns possible proteins encoded by DNA sequence '''
for prot in self.to_rna().to_proteins():
yield prot
for prot in self.reverse_complement().to_rna().to_proteins():
yield prot | def to_proteins(self):
''' Returns possible proteins encoded by DNA sequence '''
for prot in self.to_rna().to_proteins():
yield prot
for prot in self.reverse_complement().to_rna().to_proteins():
yield prot |
Python | def translate(self, stop=False, start=0):
''' Translates RNA into a sequence of aminoacids '''
protein = []
length = len(self.sequence[start:]) // 3 * 3
for i in range(0, length, 3):
aminoacid = codon_table[self.sequence[start + i:start + i + 3]]
if stop and aminoacid == '*':
break
protein.append(aminoacid)
return ''.join(protein) | def translate(self, stop=False, start=0):
''' Translates RNA into a sequence of aminoacids '''
protein = []
length = len(self.sequence[start:]) // 3 * 3
for i in range(0, length, 3):
aminoacid = codon_table[self.sequence[start + i:start + i + 3]]
if stop and aminoacid == '*':
break
protein.append(aminoacid)
return ''.join(protein) |
Python | def to_proteins(self):
''' Returns possible proteins encoded by RNA sequence '''
for start in range(3):
amino = self.translate(start=start)
for match in re.finditer(r'M', amino):
start = match.start()
size = amino[start:].find('*')
if size > 0:
seq = amino[start:start + size]
yield Protein(seq) | def to_proteins(self):
''' Returns possible proteins encoded by RNA sequence '''
for start in range(3):
amino = self.translate(start=start)
for match in re.finditer(r'M', amino):
start = match.start()
size = amino[start:].find('*')
if size > 0:
seq = amino[start:start + size]
yield Protein(seq) |
Python | def fetch(url):
''' Fetch protein sequence fasta from UniProt '''
req = request.Request(url, None, headers={
'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; \
Win64; x64; Trident/5.0'})
fasta = request.urlopen(req).read().decode("utf-8").strip()
for prot in read_fasta(fasta, Protein):
# Odd way to deal with a generator
return prot | def fetch(url):
''' Fetch protein sequence fasta from UniProt '''
req = request.Request(url, None, headers={
'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; \
Win64; x64; Trident/5.0'})
fasta = request.urlopen(req).read().decode("utf-8").strip()
for prot in read_fasta(fasta, Protein):
# Odd way to deal with a generator
return prot |
Python | def run_mprt(uids):
''' Returns IDs and positions of N-glyn for given protein IDs '''
res = ''
for uid in uids.split():
prot = fetch(source.format(uid))
pos = prot.find_motif('N{P}[ST]{P}')
if pos:
res = '{}\n{}\n{}'.format(res, uid, ' '.join(str(p) for p in pos))
return res.strip() | def run_mprt(uids):
''' Returns IDs and positions of N-glyn for given protein IDs '''
res = ''
for uid in uids.split():
prot = fetch(source.format(uid))
pos = prot.find_motif('N{P}[ST]{P}')
if pos:
res = '{}\n{}\n{}'.format(res, uid, ' '.join(str(p) for p in pos))
return res.strip() |
Python | def run_gc(fasta):
''' Returns sequence name with max GC content '''
max_content, max_dna = 0, None
for dna in read_fasta(fasta):
gc = dna.gc_content()
if gc > max_content:
max_content = gc
max_dna = dna
return '%s\n%f' % (max_dna.name, max_content) | def run_gc(fasta):
''' Returns sequence name with max GC content '''
max_content, max_dna = 0, None
for dna in read_fasta(fasta):
gc = dna.gc_content()
if gc > max_content:
max_content = gc
max_dna = dna
return '%s\n%f' % (max_dna.name, max_content) |
Python | def run_iev(couples):
''' Calculates the expected number of offspring with dominant phenotype '''
couples = [int(c) for c in couples.split()]
offspring = 0
for phenotype, ammount in enumerate(couples):
if phenotype == 3:
# Aa-Aa
prob = 0.75
elif phenotype == 4:
# Aa-aa
prob = 0.5
elif phenotype == 5:
# aa-aa
continue
else:
# AA-AA, AA-Aa, AA-aa
prob = 1
offspring += ammount * prob * 2
return offspring | def run_iev(couples):
''' Calculates the expected number of offspring with dominant phenotype '''
couples = [int(c) for c in couples.split()]
offspring = 0
for phenotype, ammount in enumerate(couples):
if phenotype == 3:
# Aa-Aa
prob = 0.75
elif phenotype == 4:
# Aa-aa
prob = 0.5
elif phenotype == 5:
# aa-aa
continue
else:
# AA-AA, AA-Aa, AA-aa
prob = 1
offspring += ammount * prob * 2
return offspring |
Python | def run_hamm(data):
''' Calculates the Hamming distance between two strings '''
seq, other = data.split('\n')
seq, other = DNA(seq), DNA(other)
return seq.hamming_distance(other) | def run_hamm(data):
''' Calculates the Hamming distance between two strings '''
seq, other = data.split('\n')
seq, other = DNA(seq), DNA(other)
return seq.hamming_distance(other) |
Python | def run_iprb(data):
''' Finds the probability of an offspring with dominant allele '''
k, m, n = [int(i) for i in data.split()]
r, pop = 0, sum([k, m, n])
# DD + DD
r += (k / pop * (k - 1) / (pop - 1))
# DD + Dr
r += (k / pop * (m) / (pop - 1))
# DD + rr
r += (k / pop * (n) / (pop - 1))
# Dr + DD
r += (m / pop * (k) / (pop - 1))
# Dr + Dr
r += (m / pop * (m - 1) / (pop - 1)) * 0.75
# Dr + rr
r += (m / pop * (n) / (pop - 1)) * 0.5
# rr + DD
r += (n / pop * (k) / (pop - 1))
# rr + Dr
r += (n / pop * (m) / (pop - 1)) * 0.5
# rr + rr
r += (n / pop * (n - 1) / (pop - 1)) * 0
return r | def run_iprb(data):
''' Finds the probability of an offspring with dominant allele '''
k, m, n = [int(i) for i in data.split()]
r, pop = 0, sum([k, m, n])
# DD + DD
r += (k / pop * (k - 1) / (pop - 1))
# DD + Dr
r += (k / pop * (m) / (pop - 1))
# DD + rr
r += (k / pop * (n) / (pop - 1))
# Dr + DD
r += (m / pop * (k) / (pop - 1))
# Dr + Dr
r += (m / pop * (m - 1) / (pop - 1)) * 0.75
# Dr + rr
r += (m / pop * (n) / (pop - 1)) * 0.5
# rr + DD
r += (n / pop * (k) / (pop - 1))
# rr + Dr
r += (n / pop * (m) / (pop - 1)) * 0.5
# rr + rr
r += (n / pop * (n - 1) / (pop - 1)) * 0
return r |
Python | def post_comments(post):
"""
Simple tag to retrieve all comments.
"""
comments = Comment.objects.comments(post=post)
return comments | def post_comments(post):
"""
Simple tag to retrieve all comments.
"""
comments = Comment.objects.comments(post=post)
return comments |
Python | def post_comments_count(post):
"""
Simple tag to display the total count of comments for one post.
"""
comments_count = Comment.objects.comments_count(post=post)
return comments_count | def post_comments_count(post):
"""
Simple tag to display the total count of comments for one post.
"""
comments_count = Comment.objects.comments_count(post=post)
return comments_count |
Python | def has_media(self):
"""
Check if post has media.
"""
if self.image:
return True
return False | def has_media(self):
"""
Check if post has media.
"""
if self.image:
return True
return False |
Python | def delete(self, user):
"""
Method to delete content already posted.
Handle image removal from filesystem prior to
delete its path from database.
"""
if user.is_authenticated() and self.author == user:
if self._handle_removed_media():
Timeline.objects.remove_from_timeline(instance=self, user=user)
super(Post, self).delete()
cache_bust([('posts_timeline', user.pk), ('comments', self.pk)])
return True
return False | def delete(self, user):
"""
Method to delete content already posted.
Handle image removal from filesystem prior to
delete its path from database.
"""
if user.is_authenticated() and self.author == user:
if self._handle_removed_media():
Timeline.objects.remove_from_timeline(instance=self, user=user)
super(Post, self).delete()
cache_bust([('posts_timeline', user.pk), ('comments', self.pk)])
return True
return False |
Python | def change_approval(self, status):
"""
Approve or disapprove a comment.
"""
if status == 'approve':
return self.approve()
elif status == 'disapprove':
return self.disapprove() | def change_approval(self, status):
"""
Approve or disapprove a comment.
"""
if status == 'approve':
return self.approve()
elif status == 'disapprove':
return self.disapprove() |
Python | def make_key(key_type, pk):
"""
Build the cache key for a particular type of cached value.
"""
key = CACHE_KEYS['user_keys'].get(key_type, None)
if key is None:
key = CACHE_KEYS['post_keys'].get(key_type)
key.format(pk=pk)
return key | def make_key(key_type, pk):
"""
Build the cache key for a particular type of cached value.
"""
key = CACHE_KEYS['user_keys'].get(key_type, None)
if key is None:
key = CACHE_KEYS['post_keys'].get(key_type)
key.format(pk=pk)
return key |
Python | def make_key_many(cache_types):
"""
Build the cache key for several cache values.
"""
keys = {}
for key_type, pk in cache_types:
key = make_key(key_type, pk)
keys.update({key_type: key})
return keys | def make_key_many(cache_types):
"""
Build the cache key for several cache values.
"""
keys = {}
for key_type, pk in cache_types:
key = make_key(key_type, pk)
keys.update({key_type: key})
return keys |
Python | def comments_count(self, post):
"""
Return a count of all post comments.
"""
count = self.comments(post).count()
return count | def comments_count(self, post):
"""
Return a count of all post comments.
"""
count = self.comments(post).count()
return count |
Python | def approved_comments_count(self, post):
"""
Return a count of all post approved comments.
"""
count = self.approved_comments(post).count()
return count | def approved_comments_count(self, post):
"""
Return a count of all post approved comments.
"""
count = self.approved_comments(post).count()
return count |
Python | def disapproved_comments_count(self, post):
"""
Return a count of all post disapproved comments.
"""
count = self.disapproved_comments(post).count()
return count | def disapproved_comments_count(self, post):
"""
Return a count of all post disapproved comments.
"""
count = self.disapproved_comments(post).count()
return count |
Python | def delete_disapproved(self, post):
"""
Remove disapproved comments for a post.
"""
if user.is_authenticated() and user == post.author:
disapproved = self.disapproved_comments(post)
if disapproved:
disapproved.delete()
cache_bust([('disapproved_comments', post.pk)])
return True
return False | def delete_disapproved(self, post):
"""
Remove disapproved comments for a post.
"""
if user.is_authenticated() and user == post.author:
disapproved = self.disapproved_comments(post)
if disapproved:
disapproved.delete()
cache_bust([('disapproved_comments', post.pk)])
return True
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.