repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
swift-nav/libsbp
generator/sbpg/targets/latex.py
render_source
def render_source(output_dir, package_specs, version): """ Render and output """ destination_filename = "%s/sbp_out.tex" % output_dir py_template = JENV.get_template(TEMPLATE_NAME) stable_msgs = [] unstable_msgs = [] prims = [] for p in sorted(package_specs, key=attrgetter('identifier')): pkg_name = p.identifier stable = p.stable # build list of required definitions (this package plus includes) # TODO: recursively include files definitions = p.definitions for inc in p.includes: inc_basename = inc.split(".")[0] for pkg in package_specs: if pkg.identifier.endswith(inc_basename): definitions += pkg.definitions if pkg_name == "swiftnav.sbp.types": prims = p.definitions for d in p.definitions: if d.public and d.static and d.sbp_id: items, size, multiplier \ = handle_fields(definitions, d.fields, "", 0, None) adj_size = "" if multiplier == 1: adj_size = "N+%d" % (size - 1) if size > 1 else "N" elif multiplier: if multiplier == size: adj_size = "%dN" % multiplier else: adj_size = "%dN+%d" % (multiplier, size - multiplier) else: adj_size = "%d" % size ti = TableItem(pkg_name, d.identifier, d.sbp_id, d.short_desc, d.desc, adj_size, items, p.stable, p.description) pkg_name = "" if stable: stable_msgs.append(ti) else: unstable_msgs.append(ti) with open(destination_filename, 'w') as f: f.write(py_template.render(msgs=stable_msgs, umsgs=unstable_msgs, prims=prims, version=version)) import subprocess import os os.chdir(output_dir) subprocess.call(["pdflatex", "--enable-write18", "-shell-escape", "sbp_out.tex"]) subprocess.call(["mv", "sbp_out.pdf", "../docs/sbp.pdf"])
python
def render_source(output_dir, package_specs, version): """ Render and output """ destination_filename = "%s/sbp_out.tex" % output_dir py_template = JENV.get_template(TEMPLATE_NAME) stable_msgs = [] unstable_msgs = [] prims = [] for p in sorted(package_specs, key=attrgetter('identifier')): pkg_name = p.identifier stable = p.stable # build list of required definitions (this package plus includes) # TODO: recursively include files definitions = p.definitions for inc in p.includes: inc_basename = inc.split(".")[0] for pkg in package_specs: if pkg.identifier.endswith(inc_basename): definitions += pkg.definitions if pkg_name == "swiftnav.sbp.types": prims = p.definitions for d in p.definitions: if d.public and d.static and d.sbp_id: items, size, multiplier \ = handle_fields(definitions, d.fields, "", 0, None) adj_size = "" if multiplier == 1: adj_size = "N+%d" % (size - 1) if size > 1 else "N" elif multiplier: if multiplier == size: adj_size = "%dN" % multiplier else: adj_size = "%dN+%d" % (multiplier, size - multiplier) else: adj_size = "%d" % size ti = TableItem(pkg_name, d.identifier, d.sbp_id, d.short_desc, d.desc, adj_size, items, p.stable, p.description) pkg_name = "" if stable: stable_msgs.append(ti) else: unstable_msgs.append(ti) with open(destination_filename, 'w') as f: f.write(py_template.render(msgs=stable_msgs, umsgs=unstable_msgs, prims=prims, version=version)) import subprocess import os os.chdir(output_dir) subprocess.call(["pdflatex", "--enable-write18", "-shell-escape", "sbp_out.tex"]) subprocess.call(["mv", "sbp_out.pdf", "../docs/sbp.pdf"])
Render and output
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/latex.py#L255-L311
swift-nav/libsbp
python/sbp/client/examples/udp.py
get_args
def get_args(): """ Get and parse arguments. """ import argparse parser = argparse.ArgumentParser( description="Swift Navigation SBP Example.") parser.add_argument( "-s", "--serial-port", default=[DEFAULT_SERIAL_PORT], nargs=1, help="specify the serial port to use.") parser.add_argument( "-b", "--baud", default=[DEFAULT_SERIAL_BAUD], nargs=1, help="specify the baud rate to use.") parser.add_argument( "-a", "--address", default=[DEFAULT_UDP_ADDRESS], nargs=1, help="specify the serial port to use.") parser.add_argument( "-p", "--udp-port", default=[DEFAULT_UDP_PORT], nargs=1, help="specify the baud rate to use.") return parser.parse_args()
python
def get_args(): """ Get and parse arguments. """ import argparse parser = argparse.ArgumentParser( description="Swift Navigation SBP Example.") parser.add_argument( "-s", "--serial-port", default=[DEFAULT_SERIAL_PORT], nargs=1, help="specify the serial port to use.") parser.add_argument( "-b", "--baud", default=[DEFAULT_SERIAL_BAUD], nargs=1, help="specify the baud rate to use.") parser.add_argument( "-a", "--address", default=[DEFAULT_UDP_ADDRESS], nargs=1, help="specify the serial port to use.") parser.add_argument( "-p", "--udp-port", default=[DEFAULT_UDP_PORT], nargs=1, help="specify the baud rate to use.") return parser.parse_args()
Get and parse arguments.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/examples/udp.py#L31-L62
swift-nav/libsbp
generator/sbpg/targets/c.py
commentify
def commentify(value): """ Builds a comment. """ value = markdown_links(value) if value is None: return if len(value.split('\n')) == 1: return "* " + value else: return '\n'.join([' * ' + l for l in value.split('\n')[:-1]])
python
def commentify(value): """ Builds a comment. """ value = markdown_links(value) if value is None: return if len(value.split('\n')) == 1: return "* " + value else: return '\n'.join([' * ' + l for l in value.split('\n')[:-1]])
Builds a comment.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/c.py#L22-L32
swift-nav/libsbp
generator/sbpg/targets/c.py
convert
def convert(value): """Converts to a C language appropriate identifier format. """ s0 = "Sbp" + value if value in COLLISIONS else value s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s0) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() + "_t"
python
def convert(value): """Converts to a C language appropriate identifier format. """ s0 = "Sbp" + value if value in COLLISIONS else value s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s0) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() + "_t"
Converts to a C language appropriate identifier format.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/c.py#L47-L53
swift-nav/libsbp
generator/sbpg/targets/c.py
mk_id
def mk_id(field): """Builds an identifier from a field. """ name = field.type_id if name == "string": return "%s" % ("char") elif name == "array" and field.size: if field.options['fill'].value not in CONSTRUCT_CODE: return "%s" % convert(field.options['fill'].value) else: return "%s" % field.options['fill'].value elif name == "array": return "%s" % convert(field.options['fill'].value) elif name not in CONSTRUCT_CODE: return convert(name) else: return name
python
def mk_id(field): """Builds an identifier from a field. """ name = field.type_id if name == "string": return "%s" % ("char") elif name == "array" and field.size: if field.options['fill'].value not in CONSTRUCT_CODE: return "%s" % convert(field.options['fill'].value) else: return "%s" % field.options['fill'].value elif name == "array": return "%s" % convert(field.options['fill'].value) elif name not in CONSTRUCT_CODE: return convert(name) else: return name
Builds an identifier from a field.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/c.py#L55-L71
swift-nav/libsbp
generator/sbpg/targets/c.py
mk_size
def mk_size(field): """Builds an identifier for a container type. """ name = field.type_id if name == "string" and field.options.get('size', None): return "%s[%d];" % (field.identifier, field.options.get('size').value) elif name == "string": return "%s[0];" % field.identifier elif name == "array" and field.options.get('size', None): return "%s[%d];" % (field.identifier, field.options.get('size').value) elif name == "array": return "%s[0];" % field.identifier else: return '%s;' % field.identifier
python
def mk_size(field): """Builds an identifier for a container type. """ name = field.type_id if name == "string" and field.options.get('size', None): return "%s[%d];" % (field.identifier, field.options.get('size').value) elif name == "string": return "%s[0];" % field.identifier elif name == "array" and field.options.get('size', None): return "%s[%d];" % (field.identifier, field.options.get('size').value) elif name == "array": return "%s[0];" % field.identifier else: return '%s;' % field.identifier
Builds an identifier for a container type.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/c.py#L73-L86
swift-nav/libsbp
generator/sbpg/targets/c.py
render_source
def render_source(output_dir, package_spec): """ Render and output to a directory given a package specification. """ path, name = package_spec.filepath destination_filename = "%s/%s.h" % (output_dir, name) py_template = JENV.get_template(MESSAGES_TEMPLATE_NAME) with open(destination_filename, 'w') as f: f.write(py_template.render(msgs=package_spec.definitions, pkg_name=name, filepath="/".join(package_spec.filepath) + ".yaml", max_msgid_len=package_spec.max_msgid_len, description=package_spec.description, timestamp=package_spec.creation_timestamp, include=extensions(package_spec.includes)))
python
def render_source(output_dir, package_spec): """ Render and output to a directory given a package specification. """ path, name = package_spec.filepath destination_filename = "%s/%s.h" % (output_dir, name) py_template = JENV.get_template(MESSAGES_TEMPLATE_NAME) with open(destination_filename, 'w') as f: f.write(py_template.render(msgs=package_spec.definitions, pkg_name=name, filepath="/".join(package_spec.filepath) + ".yaml", max_msgid_len=package_spec.max_msgid_len, description=package_spec.description, timestamp=package_spec.creation_timestamp, include=extensions(package_spec.includes)))
Render and output to a directory given a package specification.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/c.py#L93-L107
swift-nav/libsbp
python/sbp/msg.py
crc16jit
def crc16jit(buf, offset, crc, length): """CRC16 implementation acording to CCITT standards.""" for index in range(offset, offset + length): data = buf[index] lookup = crc16_tab[((nb.u2(crc) >> 8) & nb.u2(0xFF)) ^ (data & nb.u2(0xFF))] crc = ((nb.u2(crc) << nb.u2(8)) & nb.u2(0xFFFF)) ^ lookup crc = nb.u2(crc) & nb.u2(0xFFFF) return crc
python
def crc16jit(buf, offset, crc, length): """CRC16 implementation acording to CCITT standards.""" for index in range(offset, offset + length): data = buf[index] lookup = crc16_tab[((nb.u2(crc) >> 8) & nb.u2(0xFF)) ^ (data & nb.u2(0xFF))] crc = ((nb.u2(crc) << nb.u2(8)) & nb.u2(0xFFFF)) ^ lookup crc = nb.u2(crc) & nb.u2(0xFFFF) return crc
CRC16 implementation acording to CCITT standards.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/msg.py#L66-L73
swift-nav/libsbp
python/sbp/msg.py
crc16_nojit
def crc16_nojit(s, crc=0): """CRC16 implementation acording to CCITT standards.""" for ch in bytearray(s): # bytearray's elements are integers in both python 2 and 3 crc = ((crc << 8) & 0xFFFF) ^ _crc16_tab[((crc >> 8) & 0xFF) ^ (ch & 0xFF)] crc &= 0xFFFF return crc
python
def crc16_nojit(s, crc=0): """CRC16 implementation acording to CCITT standards.""" for ch in bytearray(s): # bytearray's elements are integers in both python 2 and 3 crc = ((crc << 8) & 0xFFFF) ^ _crc16_tab[((crc >> 8) & 0xFF) ^ (ch & 0xFF)] crc &= 0xFFFF return crc
CRC16 implementation acording to CCITT standards.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/msg.py#L84-L89
swift-nav/libsbp
python/sbp/msg.py
SBP._get_framed
def _get_framed(self, buf, offset, insert_payload): """Returns the framed message and updates the CRC. """ header_offset = offset + self._header_len self.length = insert_payload(buf, header_offset, self.payload) struct.pack_into(self._header_fmt, buf, offset, self.preamble, self.msg_type, self.sender, self.length) crc_offset = header_offset + self.length preamble_bytes = 1 crc_over_len = self._header_len + self.length - preamble_bytes self.crc = crc16jit(buf, offset+1, 0, crc_over_len) struct.pack_into(self._crc_fmt, buf, crc_offset, self.crc) length = preamble_bytes + crc_over_len + self._crc_len return length
python
def _get_framed(self, buf, offset, insert_payload): """Returns the framed message and updates the CRC. """ header_offset = offset + self._header_len self.length = insert_payload(buf, header_offset, self.payload) struct.pack_into(self._header_fmt, buf, offset, self.preamble, self.msg_type, self.sender, self.length) crc_offset = header_offset + self.length preamble_bytes = 1 crc_over_len = self._header_len + self.length - preamble_bytes self.crc = crc16jit(buf, offset+1, 0, crc_over_len) struct.pack_into(self._crc_fmt, buf, crc_offset, self.crc) length = preamble_bytes + crc_over_len + self._crc_len return length
Returns the framed message and updates the CRC.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/msg.py#L172-L191
swift-nav/libsbp
python/sbp/msg.py
SBP.pack
def pack(self): """Pack to framed binary message. """ buf = np.zeros(512, dtype=np.uint8) packed_len = self._get_framed(buf, 0, self._copy_payload) d = buf[:packed_len] return d.tobytes()
python
def pack(self): """Pack to framed binary message. """ buf = np.zeros(512, dtype=np.uint8) packed_len = self._get_framed(buf, 0, self._copy_payload) d = buf[:packed_len] return d.tobytes()
Pack to framed binary message.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/msg.py#L193-L200
swift-nav/libsbp
python/sbp/msg.py
SBP.pack_into
def pack_into(self, buf, offset, write_payload): """Pack to framed binary message. """ return self._get_framed(buf, offset, write_payload)
python
def pack_into(self, buf, offset, write_payload): """Pack to framed binary message. """ return self._get_framed(buf, offset, write_payload)
Pack to framed binary message.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/msg.py#L202-L206
swift-nav/libsbp
python/sbp/msg.py
SBP.unpack
def unpack(d): """Unpack and return a framed binary message. """ p = SBP._parser.parse(d) assert p.preamble == SBP_PREAMBLE, "Invalid preamble 0x%x." % p.preamble return SBP(p.msg_type, p.sender, p.length, p.payload, p.crc)
python
def unpack(d): """Unpack and return a framed binary message. """ p = SBP._parser.parse(d) assert p.preamble == SBP_PREAMBLE, "Invalid preamble 0x%x." % p.preamble return SBP(p.msg_type, p.sender, p.length, p.payload, p.crc)
Unpack and return a framed binary message.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/msg.py#L209-L215
swift-nav/libsbp
python/sbp/msg.py
SBP.to_json
def to_json(self, sort_keys=False): """Produce a JSON-encoded SBP message. """ d = self.to_json_dict() return json.dumps(d, sort_keys=sort_keys)
python
def to_json(self, sort_keys=False): """Produce a JSON-encoded SBP message. """ d = self.to_json_dict() return json.dumps(d, sort_keys=sort_keys)
Produce a JSON-encoded SBP message.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/msg.py#L234-L239
swift-nav/libsbp
python/sbp/msg.py
SBP.from_json
def from_json(s): """Given a JSON-encoded message, build an object. """ d = json.loads(s) sbp = SBP.from_json_dict(d) return sbp
python
def from_json(s): """Given a JSON-encoded message, build an object. """ d = json.loads(s) sbp = SBP.from_json_dict(d) return sbp
Given a JSON-encoded message, build an object.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/msg.py#L242-L248
swift-nav/libsbp
python/sbp/client/drivers/pyserial_driver.py
PySerialDriver.read
def read(self, size): """ Read wrapper. Parameters ---------- size : int Number of bytes to read. """ try: return self.handle.read(size) except (OSError, serial.SerialException): print() print("Piksi disconnected") print() self.handle.close() raise IOError
python
def read(self, size): """ Read wrapper. Parameters ---------- size : int Number of bytes to read. """ try: return self.handle.read(size) except (OSError, serial.SerialException): print() print("Piksi disconnected") print() self.handle.close() raise IOError
Read wrapper. Parameters ---------- size : int Number of bytes to read.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/drivers/pyserial_driver.py#L69-L85
swift-nav/libsbp
python/sbp/client/drivers/pyserial_driver.py
PySerialDriver.write
def write(self, s): """ Write wrapper. Parameters ---------- s : bytes Bytes to write """ try: return self.handle.write(s) except (OSError, serial.SerialException, serial.writeTimeoutError) as e: if e == serial.writeTimeoutError: print("sbp pyserial_driver: writeTimeoutError") return 0 else: print() print("Piksi disconnected") print() self.handle.close() raise IOError
python
def write(self, s): """ Write wrapper. Parameters ---------- s : bytes Bytes to write """ try: return self.handle.write(s) except (OSError, serial.SerialException, serial.writeTimeoutError) as e: if e == serial.writeTimeoutError: print("sbp pyserial_driver: writeTimeoutError") return 0 else: print() print("Piksi disconnected") print() self.handle.close() raise IOError
Write wrapper. Parameters ---------- s : bytes Bytes to write
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/drivers/pyserial_driver.py#L87-L108
swift-nav/libsbp
generator/sbpg/targets/java.py
commentify
def commentify(value): """ Builds a comment. """ value = comment_links(value) if value is None: return if len(value.split('\n')) == 1: return "* " + value else: return '\n'.join([' * ' + l for l in value.split('\n')[:-1]])
python
def commentify(value): """ Builds a comment. """ value = comment_links(value) if value is None: return if len(value.split('\n')) == 1: return "* " + value else: return '\n'.join([' * ' + l for l in value.split('\n')[:-1]])
Builds a comment.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/java.py#L60-L70
swift-nav/libsbp
generator/sbpg/targets/java.py
parse_type
def parse_type(field): """ Function to pull a type from the binary payload. """ if field.type_id == 'string': if 'size' in field.options: return "parser.getString(%d)" % field.options['size'].value else: return "parser.getString()" elif field.type_id in JAVA_TYPE_MAP: # Primitive java types have extractor methods in SBPMessage.Parser return "parser.get" + field.type_id.capitalize() + "()" if field.type_id == 'array': # Call function to build array t = field.options['fill'].value if t in JAVA_TYPE_MAP: if 'size' in field.options: return "parser.getArrayof%s(%d)" % (t.capitalize(), field.options['size'].value) else: return "parser.getArrayof%s()" % t.capitalize() else: if 'size' in field.options: return "parser.getArray(%s.class, %d)" % (t, field.options['size'].value) else: return "parser.getArray(%s.class)" % t else: # This is an inner class, call default constructor return "new %s().parse(parser)" % field.type_id
python
def parse_type(field): """ Function to pull a type from the binary payload. """ if field.type_id == 'string': if 'size' in field.options: return "parser.getString(%d)" % field.options['size'].value else: return "parser.getString()" elif field.type_id in JAVA_TYPE_MAP: # Primitive java types have extractor methods in SBPMessage.Parser return "parser.get" + field.type_id.capitalize() + "()" if field.type_id == 'array': # Call function to build array t = field.options['fill'].value if t in JAVA_TYPE_MAP: if 'size' in field.options: return "parser.getArrayof%s(%d)" % (t.capitalize(), field.options['size'].value) else: return "parser.getArrayof%s()" % t.capitalize() else: if 'size' in field.options: return "parser.getArray(%s.class, %d)" % (t, field.options['size'].value) else: return "parser.getArray(%s.class)" % t else: # This is an inner class, call default constructor return "new %s().parse(parser)" % field.type_id
Function to pull a type from the binary payload.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/java.py#L81-L108
swift-nav/libsbp
generator/sbpg/targets/java.py
build_type
def build_type(field): """ Function to pack a type into the binary payload. """ if field.type_id == 'string': if 'size' in field.options: return "builder.putString(%s, %d)" % (field.identifier, field.options['size'].value) else: return "builder.putString(%s)" % field.identifier elif field.type_id in JAVA_TYPE_MAP: # Primitive java types have extractor methods in SBPMessage.Builder return "builder.put%s(%s)" % (field.type_id.capitalize(), field.identifier) if field.type_id == 'array': # Call function to build array t = field.options['fill'].value if t in JAVA_TYPE_MAP: if 'size' in field.options: return "builder.putArrayof%s(%s, %d)" % (t.capitalize(), field.identifier, field.options['size'].value) else: return "builder.putArrayof%s(%s)" % (t.capitalize(), field.identifier) else: if 'size' in field.options: return "builder.putArray(%s, %d)" % (field.identifier, field.options['size'].value) else: return "builder.putArray(%s)" % field.identifier else: return "%s.build(builder)" % field.identifier
python
def build_type(field): """ Function to pack a type into the binary payload. """ if field.type_id == 'string': if 'size' in field.options: return "builder.putString(%s, %d)" % (field.identifier, field.options['size'].value) else: return "builder.putString(%s)" % field.identifier elif field.type_id in JAVA_TYPE_MAP: # Primitive java types have extractor methods in SBPMessage.Builder return "builder.put%s(%s)" % (field.type_id.capitalize(), field.identifier) if field.type_id == 'array': # Call function to build array t = field.options['fill'].value if t in JAVA_TYPE_MAP: if 'size' in field.options: return "builder.putArrayof%s(%s, %d)" % (t.capitalize(), field.identifier, field.options['size'].value) else: return "builder.putArrayof%s(%s)" % (t.capitalize(), field.identifier) else: if 'size' in field.options: return "builder.putArray(%s, %d)" % (field.identifier, field.options['size'].value) else: return "builder.putArray(%s)" % field.identifier else: return "%s.build(builder)" % field.identifier
Function to pack a type into the binary payload.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/java.py#L110-L138
swift-nav/libsbp
generator/sbpg/targets/java.py
render_source
def render_source(output_dir, package_spec, jenv=JENV): """ Render and output """ path, module_name = package_spec.filepath java_template = jenv.get_template(TEMPLATE_NAME) module_path = "com." + package_spec.identifier yaml_filepath = "/".join(package_spec.filepath) + ".yaml" includes = [".".join(i.split(".")[:-1]) for i in package_spec.includes] includes = [i for i in includes if i != "types"] for msg in package_spec.definitions: msg_name = classnameify(msg.identifier) if msg.sbp_id else msg.identifier l = "/".join(package_spec.filepath) destination_filename = "%s/com/%s/%s.java" % (output_dir, l , msg_name) # Create the output directory if it doesn't exist if not os.path.exists(os.path.dirname(destination_filename)): os.mkdir(os.path.dirname(destination_filename)) with open(destination_filename, 'w+') as f: print(destination_filename) f.write(java_template.render(m=msg, filepath=yaml_filepath, module_path=module_path, include=includes, description=package_spec.description))
python
def render_source(output_dir, package_spec, jenv=JENV): """ Render and output """ path, module_name = package_spec.filepath java_template = jenv.get_template(TEMPLATE_NAME) module_path = "com." + package_spec.identifier yaml_filepath = "/".join(package_spec.filepath) + ".yaml" includes = [".".join(i.split(".")[:-1]) for i in package_spec.includes] includes = [i for i in includes if i != "types"] for msg in package_spec.definitions: msg_name = classnameify(msg.identifier) if msg.sbp_id else msg.identifier l = "/".join(package_spec.filepath) destination_filename = "%s/com/%s/%s.java" % (output_dir, l , msg_name) # Create the output directory if it doesn't exist if not os.path.exists(os.path.dirname(destination_filename)): os.mkdir(os.path.dirname(destination_filename)) with open(destination_filename, 'w+') as f: print(destination_filename) f.write(java_template.render(m=msg, filepath=yaml_filepath, module_path=module_path, include=includes, description=package_spec.description))
Render and output
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/java.py#L158-L183
swift-nav/libsbp
generator/sbpg/targets/java.py
render_table
def render_table(output_dir, packages, jenv=JENV): """ Render and output dispatch table """ destination_filename = output_dir + "/com/swiftnav/sbp/client/MessageTable.java" with open(destination_filename, 'w+') as f: print(destination_filename) f.write(jenv.get_template(TEMPLATE_TABLE_NAME).render(packages=packages))
python
def render_table(output_dir, packages, jenv=JENV): """ Render and output dispatch table """ destination_filename = output_dir + "/com/swiftnav/sbp/client/MessageTable.java" with open(destination_filename, 'w+') as f: print(destination_filename) f.write(jenv.get_template(TEMPLATE_TABLE_NAME).render(packages=packages))
Render and output dispatch table
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/java.py#L185-L192
swift-nav/libsbp
generator/sbpg/targets/javascript.py
construct_format
def construct_format(f, type_map=CONSTRUCT_CODE): """ Formats for binary-parser library. """ formatted = "" if type_map.get(f.type_id, None): return "%s('%s')" % (type_map.get(f.type_id), f.identifier) elif f.type_id == 'string' and f.options.get('size', None): return "string('%s', { length: %d })" % (f.identifier, f.options['size'].value) elif f.type_id == 'string': return "string('%s', { greedy: true })" % (f.identifier) elif f.type_id == 'array': fill = f.options['fill'].value f_ = copy.copy(f) f_.type_id = fill size = f.options.get('size', None) size_fn = f.options.get('size_fn', None) field_type = type_map.get(f_.type_id, None) if field_type is None: field_type = "%s.prototype.parser" % f_.type_id else: field_type = "'%s'" % field_type if size is not None: d = { "'uint16'" : "'uint16le'", "'uint32'" : "'uint32le'", "'uint64'" : "'uint16le'", "'int16'" : "'int16le'", "'int32'" : "'int32le'", "'int64'" : "'int16le'" } field_type_arr = d.get(field_type, field_type) return "array('%s', { length: %d, type: %s })" % (f.identifier, size.value, field_type_arr) elif f.options.get('size_fn') is not None: return "array('%s', { type: %s, length: '%s' })" % (f_.identifier, field_type, size_fn.value) else: return "array('%s', { type: %s, readUntil: 'eof' })" % (f_.identifier, field_type) else: return "nest('%s', { type: %s.prototype.parser })" % (f.identifier, f.type_id) return formatted
python
def construct_format(f, type_map=CONSTRUCT_CODE): """ Formats for binary-parser library. """ formatted = "" if type_map.get(f.type_id, None): return "%s('%s')" % (type_map.get(f.type_id), f.identifier) elif f.type_id == 'string' and f.options.get('size', None): return "string('%s', { length: %d })" % (f.identifier, f.options['size'].value) elif f.type_id == 'string': return "string('%s', { greedy: true })" % (f.identifier) elif f.type_id == 'array': fill = f.options['fill'].value f_ = copy.copy(f) f_.type_id = fill size = f.options.get('size', None) size_fn = f.options.get('size_fn', None) field_type = type_map.get(f_.type_id, None) if field_type is None: field_type = "%s.prototype.parser" % f_.type_id else: field_type = "'%s'" % field_type if size is not None: d = { "'uint16'" : "'uint16le'", "'uint32'" : "'uint32le'", "'uint64'" : "'uint16le'", "'int16'" : "'int16le'", "'int32'" : "'int32le'", "'int64'" : "'int16le'" } field_type_arr = d.get(field_type, field_type) return "array('%s', { length: %d, type: %s })" % (f.identifier, size.value, field_type_arr) elif f.options.get('size_fn') is not None: return "array('%s', { type: %s, length: '%s' })" % (f_.identifier, field_type, size_fn.value) else: return "array('%s', { type: %s, readUntil: 'eof' })" % (f_.identifier, field_type) else: return "nest('%s', { type: %s.prototype.parser })" % (f.identifier, f.type_id) return formatted
Formats for binary-parser library.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/javascript.py#L120-L153
swift-nav/libsbp
generator/sbpg/targets/javascript.py
js_classnameify
def js_classnameify(s): """ Makes a classname. """ if not '_' in s: return s return ''.join(w[0].upper() + w[1:].lower() for w in s.split('_'))
python
def js_classnameify(s): """ Makes a classname. """ if not '_' in s: return s return ''.join(w[0].upper() + w[1:].lower() for w in s.split('_'))
Makes a classname.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/javascript.py#L156-L162
swift-nav/libsbp
python/sbp/observation.py
MsgEphemerisGPSDepF.from_binary
def from_binary(self, d): """Given a binary payload d, update the appropriate payload fields of the message. """ p = MsgEphemerisGPSDepF._parser.parse(d) for n in self.__class__.__slots__: setattr(self, n, getattr(p, n))
python
def from_binary(self, d): """Given a binary payload d, update the appropriate payload fields of the message. """ p = MsgEphemerisGPSDepF._parser.parse(d) for n in self.__class__.__slots__: setattr(self, n, getattr(p, n))
Given a binary payload d, update the appropriate payload fields of the message.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/observation.py#L1715-L1722
swift-nav/libsbp
python/sbp/observation.py
MsgEphemerisGPSDepF.to_binary
def to_binary(self): """Produce a framed/packed SBP message. """ c = containerize(exclude_fields(self)) self.payload = MsgEphemerisGPSDepF._parser.build(c) return self.pack()
python
def to_binary(self): """Produce a framed/packed SBP message. """ c = containerize(exclude_fields(self)) self.payload = MsgEphemerisGPSDepF._parser.build(c) return self.pack()
Produce a framed/packed SBP message.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/observation.py#L1724-L1730
swift-nav/libsbp
python/sbp/observation.py
MsgEphemerisGPSDepF.into_buffer
def into_buffer(self, buf, offset): """Produce a framed/packed SBP message into the provided buffer and offset. """ self.payload = containerize(exclude_fields(self)) self.parser = MsgEphemerisGPSDepF._parser self.stream_payload.reset(buf, offset) return self.pack_into(buf, offset, self._build_payload)
python
def into_buffer(self, buf, offset): """Produce a framed/packed SBP message into the provided buffer and offset. """ self.payload = containerize(exclude_fields(self)) self.parser = MsgEphemerisGPSDepF._parser self.stream_payload.reset(buf, offset) return self.pack_into(buf, offset, self._build_payload)
Produce a framed/packed SBP message into the provided buffer and offset.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/observation.py#L1732-L1739
swift-nav/libsbp
python/sbp/client/framer.py
Framer._readall
def _readall(self, size): """ Read until all bytes are collected. Parameters ---------- size : int Number of bytes to read. """ data = b"" while len(data) < size: d = self._read(size - len(data)) if self._broken: raise StopIteration if not d: # NOTE (Buro/jgross): Force a yield here to another thread. In # case the stream fails midstream, the spinning here causes # the UI thread to lock up without yielding. time.sleep(0) continue data += d return data
python
def _readall(self, size): """ Read until all bytes are collected. Parameters ---------- size : int Number of bytes to read. """ data = b"" while len(data) < size: d = self._read(size - len(data)) if self._broken: raise StopIteration if not d: # NOTE (Buro/jgross): Force a yield here to another thread. In # case the stream fails midstream, the spinning here causes # the UI thread to lock up without yielding. time.sleep(0) continue data += d return data
Read until all bytes are collected. Parameters ---------- size : int Number of bytes to read.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/framer.py#L94-L115
swift-nav/libsbp
python/sbp/client/framer.py
Framer._receive
def _receive(self): """ Read and build SBP message. """ # preamble - not readall(1) to allow breaking before messages, # empty input preamble = self._read(1) if not preamble: return None elif ord(preamble) != SBP_PREAMBLE: if self._verbose: print("Host Side Unhandled byte: 0x%02x" % ord(preamble)) return None # hdr hdr = self._readall(5) msg_crc = crc16(hdr) msg_type, sender, msg_len = struct.unpack("<HHB", hdr) # data data = self._readall(msg_len) msg_crc = crc16(data, msg_crc) # crc crc = self._readall(2) crc, = struct.unpack("<H", crc) if crc != msg_crc: if self._verbose: print("crc mismatch: 0x%04X 0x%04X" % (msg_crc, crc)) return None msg = SBP(msg_type, sender, msg_len, data, crc) try: msg = self._dispatch(msg) except Exception as exc: warnings.warn("SBP dispatch error: %s" % (exc,)) return msg
python
def _receive(self): """ Read and build SBP message. """ # preamble - not readall(1) to allow breaking before messages, # empty input preamble = self._read(1) if not preamble: return None elif ord(preamble) != SBP_PREAMBLE: if self._verbose: print("Host Side Unhandled byte: 0x%02x" % ord(preamble)) return None # hdr hdr = self._readall(5) msg_crc = crc16(hdr) msg_type, sender, msg_len = struct.unpack("<HHB", hdr) # data data = self._readall(msg_len) msg_crc = crc16(data, msg_crc) # crc crc = self._readall(2) crc, = struct.unpack("<H", crc) if crc != msg_crc: if self._verbose: print("crc mismatch: 0x%04X 0x%04X" % (msg_crc, crc)) return None msg = SBP(msg_type, sender, msg_len, data, crc) try: msg = self._dispatch(msg) except Exception as exc: warnings.warn("SBP dispatch error: %s" % (exc,)) return msg
Read and build SBP message.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/framer.py#L117-L149
swift-nav/libsbp
python/sbp/table.py
dispatch
def dispatch(msg, table=_SBP_TABLE): """ Dispatch an SBP message type based on its `msg_type` and parse its payload. Parameters ---------- driver : :class:`SBP` A parsed SBP object. table : dict Any table mapping unique SBP message type IDs to SBP message constructors. Returns ---------- SBP message with a parsed payload. """ try: return table[msg.msg_type](msg) except KeyError: warn = "No message found for msg_type id %d for msg %s." \ % (msg.msg_type, msg) warnings.warn(warn, RuntimeWarning) return msg except FormatFieldError: warnings.warn("SBP payload deserialization error! 0x%x" % msg.msg_type, RuntimeWarning) return msg
python
def dispatch(msg, table=_SBP_TABLE): """ Dispatch an SBP message type based on its `msg_type` and parse its payload. Parameters ---------- driver : :class:`SBP` A parsed SBP object. table : dict Any table mapping unique SBP message type IDs to SBP message constructors. Returns ---------- SBP message with a parsed payload. """ try: return table[msg.msg_type](msg) except KeyError: warn = "No message found for msg_type id %d for msg %s." \ % (msg.msg_type, msg) warnings.warn(warn, RuntimeWarning) return msg except FormatFieldError: warnings.warn("SBP payload deserialization error! 0x%x" % msg.msg_type, RuntimeWarning) return msg
Dispatch an SBP message type based on its `msg_type` and parse its payload. Parameters ---------- driver : :class:`SBP` A parsed SBP object. table : dict Any table mapping unique SBP message type IDs to SBP message constructors. Returns ---------- SBP message with a parsed payload.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/table.py#L70-L98
swift-nav/libsbp
python/sbp/client/handler.py
Handler._recv_thread
def _recv_thread(self): """ Internal thread to iterate over source messages and dispatch callbacks. """ for msg, metadata in self._source: if msg.msg_type: self._call(msg, **metadata) # Break any upstream iterators for sink in self._sinks: i = sink() if i is not None: i.breakiter() self._dead = True
python
def _recv_thread(self): """ Internal thread to iterate over source messages and dispatch callbacks. """ for msg, metadata in self._source: if msg.msg_type: self._call(msg, **metadata) # Break any upstream iterators for sink in self._sinks: i = sink() if i is not None: i.breakiter() self._dead = True
Internal thread to iterate over source messages and dispatch callbacks.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/handler.py#L52-L64
swift-nav/libsbp
python/sbp/client/handler.py
Handler.filter
def filter(self, msg_type=None, maxsize=0): """ Get a filtered iterator of messages for synchronous, blocking use in another thread. """ if self._dead: return iter(()) iterator = Handler._SBPQueueIterator(maxsize) # We use a weakref so that the iterator may be garbage collected if it's # consumer no longer has a reference. ref = weakref.ref(iterator) self._sinks.append(ref) def feediter(msg, **metadata): i = ref() if i is not None: i(msg, **metadata) else: raise Handler._DeadCallbackException self.add_callback(feediter, msg_type) return iterator
python
def filter(self, msg_type=None, maxsize=0): """ Get a filtered iterator of messages for synchronous, blocking use in another thread. """ if self._dead: return iter(()) iterator = Handler._SBPQueueIterator(maxsize) # We use a weakref so that the iterator may be garbage collected if it's # consumer no longer has a reference. ref = weakref.ref(iterator) self._sinks.append(ref) def feediter(msg, **metadata): i = ref() if i is not None: i(msg, **metadata) else: raise Handler._DeadCallbackException self.add_callback(feediter, msg_type) return iterator
Get a filtered iterator of messages for synchronous, blocking use in another thread.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/handler.py#L79-L100
swift-nav/libsbp
python/sbp/client/handler.py
Handler.add_callback
def add_callback(self, callback, msg_type=None): """ Add per message type or global callback. Parameters ---------- callback : fn Callback function msg_type : int | iterable Message type to register callback against. Default `None` means global callback. Iterable type adds the callback to all the message types. """ cb_keys = self._to_iter(msg_type) if cb_keys is not None: for msg_type_ in cb_keys: self._callbacks[msg_type_].add(callback) else: self._callbacks[msg_type].add(callback)
python
def add_callback(self, callback, msg_type=None): """ Add per message type or global callback. Parameters ---------- callback : fn Callback function msg_type : int | iterable Message type to register callback against. Default `None` means global callback. Iterable type adds the callback to all the message types. """ cb_keys = self._to_iter(msg_type) if cb_keys is not None: for msg_type_ in cb_keys: self._callbacks[msg_type_].add(callback) else: self._callbacks[msg_type].add(callback)
Add per message type or global callback. Parameters ---------- callback : fn Callback function msg_type : int | iterable Message type to register callback against. Default `None` means global callback. Iterable type adds the callback to all the message types.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/handler.py#L115-L132
swift-nav/libsbp
python/sbp/client/handler.py
Handler.remove_callback
def remove_callback(self, callback, msg_type=None): """ Remove per message type of global callback. Parameters ---------- callback : fn Callback function msg_type : int | iterable Message type to remove callback from. Default `None` means global callback. Iterable type removes the callback from all the message types. """ if msg_type is None: msg_type = self._callbacks.keys() cb_keys = self._to_iter(msg_type) if cb_keys is not None: for msg_type_ in cb_keys: try: self._callbacks[msg_type_].remove(callback) except KeyError: pass else: self._callbacks[msg_type].remove(callback)
python
def remove_callback(self, callback, msg_type=None): """ Remove per message type of global callback. Parameters ---------- callback : fn Callback function msg_type : int | iterable Message type to remove callback from. Default `None` means global callback. Iterable type removes the callback from all the message types. """ if msg_type is None: msg_type = self._callbacks.keys() cb_keys = self._to_iter(msg_type) if cb_keys is not None: for msg_type_ in cb_keys: try: self._callbacks[msg_type_].remove(callback) except KeyError: pass else: self._callbacks[msg_type].remove(callback)
Remove per message type of global callback. Parameters ---------- callback : fn Callback function msg_type : int | iterable Message type to remove callback from. Default `None` means global callback. Iterable type removes the callback from all the message types.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/handler.py#L134-L156
swift-nav/libsbp
python/sbp/client/handler.py
Handler._gc_dead_sinks
def _gc_dead_sinks(self): """ Remove any dead weakrefs. """ deadsinks = [] for i in self._sinks: if i() is None: deadsinks.append(i) for i in deadsinks: self._sinks.remove(i)
python
def _gc_dead_sinks(self): """ Remove any dead weakrefs. """ deadsinks = [] for i in self._sinks: if i() is None: deadsinks.append(i) for i in deadsinks: self._sinks.remove(i)
Remove any dead weakrefs.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/handler.py#L158-L167
swift-nav/libsbp
python/sbp/client/handler.py
Handler._call
def _call(self, msg, **metadata): """ Process message with all callbacks (global and per message type). """ if msg.msg_type: for callback in self._get_callbacks(msg.msg_type): try: callback(msg, **metadata) except Handler._DeadCallbackException: # The callback was an upstream iterator that has been garbage # collected. Remove it from our internal structures. self.remove_callback(callback) self._gc_dead_sinks() except SystemExit: raise except: import traceback traceback.print_exc()
python
def _call(self, msg, **metadata): """ Process message with all callbacks (global and per message type). """ if msg.msg_type: for callback in self._get_callbacks(msg.msg_type): try: callback(msg, **metadata) except Handler._DeadCallbackException: # The callback was an upstream iterator that has been garbage # collected. Remove it from our internal structures. self.remove_callback(callback) self._gc_dead_sinks() except SystemExit: raise except: import traceback traceback.print_exc()
Process message with all callbacks (global and per message type).
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/handler.py#L180-L197
swift-nav/libsbp
python/sbp/client/handler.py
Handler.wait
def wait(self, msg_type, timeout=1.0): """ Wait for a SBP message. Parameters ---------- msg_type : int SBP message type. timeout : float Waiting period """ event = threading.Event() payload = {'data': None} def cb(sbp_msg, **metadata): payload['data'] = sbp_msg event.set() self.add_callback(cb, msg_type) event.wait(timeout) self.remove_callback(cb, msg_type) return payload['data']
python
def wait(self, msg_type, timeout=1.0): """ Wait for a SBP message. Parameters ---------- msg_type : int SBP message type. timeout : float Waiting period """ event = threading.Event() payload = {'data': None} def cb(sbp_msg, **metadata): payload['data'] = sbp_msg event.set() self.add_callback(cb, msg_type) event.wait(timeout) self.remove_callback(cb, msg_type) return payload['data']
Wait for a SBP message. Parameters ---------- msg_type : int SBP message type. timeout : float Waiting period
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/handler.py#L221-L242
swift-nav/libsbp
python/sbp/client/handler.py
Handler.wait_callback
def wait_callback(self, callback, msg_type=None, timeout=1.0): """ Wait for a SBP message with a callback. Parameters ---------- callback : fn Callback function msg_type : int | iterable Message type to register callback against. Default `None` means global callback. Iterable type adds the callback to all the message types. timeout : float Waiting period """ event = threading.Event() def cb(msg, **metadata): callback(msg, **metadata) event.set() self.add_callback(cb, msg_type) event.wait(timeout) self.remove_callback(cb, msg_type)
python
def wait_callback(self, callback, msg_type=None, timeout=1.0): """ Wait for a SBP message with a callback. Parameters ---------- callback : fn Callback function msg_type : int | iterable Message type to register callback against. Default `None` means global callback. Iterable type adds the callback to all the message types. timeout : float Waiting period """ event = threading.Event() def cb(msg, **metadata): callback(msg, **metadata) event.set() self.add_callback(cb, msg_type) event.wait(timeout) self.remove_callback(cb, msg_type)
Wait for a SBP message with a callback. Parameters ---------- callback : fn Callback function msg_type : int | iterable Message type to register callback against. Default `None` means global callback. Iterable type adds the callback to all the message types. timeout : float Waiting period
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/handler.py#L244-L266
swift-nav/libsbp
python/sbp/client/drivers/cdc_driver.py
CdcDriver.read
def read(self, size): """ Read wrapper. Parameters ---------- size : int Number of bytes to read. """ try: return_val = self.handle.read(size) if return_val == '': print() print("Piksi disconnected") print() raise IOError return return_val except OSError: print() print("Piksi disconnected") print() raise IOError
python
def read(self, size): """ Read wrapper. Parameters ---------- size : int Number of bytes to read. """ try: return_val = self.handle.read(size) if return_val == '': print() print("Piksi disconnected") print() raise IOError return return_val except OSError: print() print("Piksi disconnected") print() raise IOError
Read wrapper. Parameters ---------- size : int Number of bytes to read.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/drivers/cdc_driver.py#L27-L48
swift-nav/libsbp
python/sbp/client/drivers/cdc_driver.py
CdcDriver.write
def write(self, s): """ Write wrapper. Parameters ---------- s : bytes Bytes to write """ try: return self.handle.write(s) except OSError: print() print("Piksi disconnected") print() raise IOError
python
def write(self, s): """ Write wrapper. Parameters ---------- s : bytes Bytes to write """ try: return self.handle.write(s) except OSError: print() print("Piksi disconnected") print() raise IOError
Write wrapper. Parameters ---------- s : bytes Bytes to write
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/drivers/cdc_driver.py#L50-L65
swift-nav/libsbp
generator/sbpg/utils.py
fmt_repr
def fmt_repr(obj): """ Return pretty printed string representation of an object. """ items = {k: v for k, v in list(obj.__dict__.items())} return "<%s: {%s}>" % (obj.__class__.__name__, pprint.pformat(items, width=1))
python
def fmt_repr(obj): """ Return pretty printed string representation of an object. """ items = {k: v for k, v in list(obj.__dict__.items())} return "<%s: {%s}>" % (obj.__class__.__name__, pprint.pformat(items, width=1))
Return pretty printed string representation of an object.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/utils.py#L14-L20
swift-nav/libsbp
python/sbp/client/util/settingmonitor.py
SettingMonitor.capture_setting
def capture_setting(self, sbp_msg, **metadata): """Callback to extract and store setting values from SBP_MSG_SETTINGS_READ_RESP Messages of any type other than SBP_MSG_SETTINGS_READ_RESP are ignored """ if sbp_msg.msg_type == SBP_MSG_SETTINGS_READ_RESP: section, setting, value = sbp_msg.payload.split(b'\0')[:3] self.settings.append((section, setting, value))
python
def capture_setting(self, sbp_msg, **metadata): """Callback to extract and store setting values from SBP_MSG_SETTINGS_READ_RESP Messages of any type other than SBP_MSG_SETTINGS_READ_RESP are ignored """ if sbp_msg.msg_type == SBP_MSG_SETTINGS_READ_RESP: section, setting, value = sbp_msg.payload.split(b'\0')[:3] self.settings.append((section, setting, value))
Callback to extract and store setting values from SBP_MSG_SETTINGS_READ_RESP Messages of any type other than SBP_MSG_SETTINGS_READ_RESP are ignored
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/util/settingmonitor.py#L32-L40
swift-nav/libsbp
python/sbp/client/util/settingmonitor.py
SettingMonitor.wait_for_setting_value
def wait_for_setting_value(self, section, setting, value, wait_time=5.0): """Function to wait wait_time seconds to see a SBP_MSG_SETTINGS_READ_RESP message with a user-specified value """ expire = time.time() + wait_time ok = False while not ok and time.time() < expire: settings = [x for x in self.settings if (x[0], x[1]) == (section, setting)] # Check to see if the last setting has the value we want if len(settings) > 0: ok = settings[-1][2] == value time.sleep(0.1) return ok
python
def wait_for_setting_value(self, section, setting, value, wait_time=5.0): """Function to wait wait_time seconds to see a SBP_MSG_SETTINGS_READ_RESP message with a user-specified value """ expire = time.time() + wait_time ok = False while not ok and time.time() < expire: settings = [x for x in self.settings if (x[0], x[1]) == (section, setting)] # Check to see if the last setting has the value we want if len(settings) > 0: ok = settings[-1][2] == value time.sleep(0.1) return ok
Function to wait wait_time seconds to see a SBP_MSG_SETTINGS_READ_RESP message with a user-specified value
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/util/settingmonitor.py#L42-L55
swift-nav/libsbp
python/sbp/client/util/settingmonitor.py
SettingMonitor.clear
def clear(self, section=None, setting=None, value=None): """Clear settings""" match = [all((section is None or x_y_z[0] == section, setting is None or x_y_z[1] == setting, value is None or x_y_z[2] == value)) for x_y_z in self.settings] keep = [setting_remove for setting_remove in zip(self.settings,match) if not setting_remove[1]] self.settings[:] = [x[0] for x in keep]
python
def clear(self, section=None, setting=None, value=None): """Clear settings""" match = [all((section is None or x_y_z[0] == section, setting is None or x_y_z[1] == setting, value is None or x_y_z[2] == value)) for x_y_z in self.settings] keep = [setting_remove for setting_remove in zip(self.settings,match) if not setting_remove[1]] self.settings[:] = [x[0] for x in keep]
Clear settings
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/util/settingmonitor.py#L57-L65
swift-nav/libsbp
generator/sbpg/targets/python.py
construct_format
def construct_format(f, type_map=CONSTRUCT_CODE): """ Formats for Construct. """ formatted = "" if type_map.get(f.type_id, None): return "'{identifier}' / {type_id}".format(type_id=type_map.get(f.type_id), identifier=f.identifier) elif f.type_id == 'string' and f.options.get('size', None): return "'{id}'/ construct.Bytes({size})".format(id=f.identifier, size=f.options['size'].value) elif f.type_id == 'string': return "'{id}' / construct.GreedyBytes".format(id=f.identifier) elif f.type_id == 'array' and f.options.get('size', None): fill = f.options['fill'].value f_ = copy.copy(f) f_.type_id = fill s = f.options.get('size', None).value return "'{id}' / construct.Array({size}, {type})".format(id=f.identifier, size=s, type=type_map.get(f_.type_id, 'construct.Byte')) elif f.type_id == 'array': fill = f.options['fill'].value f_ = copy.copy(f) f_.type_id = fill return "construct.GreedyRange(%s)" % construct_format(f_) else: return "'%s' / construct.Struct(%s._parser)" % (f.identifier, f.type_id) return formatted
python
def construct_format(f, type_map=CONSTRUCT_CODE): """ Formats for Construct. """ formatted = "" if type_map.get(f.type_id, None): return "'{identifier}' / {type_id}".format(type_id=type_map.get(f.type_id), identifier=f.identifier) elif f.type_id == 'string' and f.options.get('size', None): return "'{id}'/ construct.Bytes({size})".format(id=f.identifier, size=f.options['size'].value) elif f.type_id == 'string': return "'{id}' / construct.GreedyBytes".format(id=f.identifier) elif f.type_id == 'array' and f.options.get('size', None): fill = f.options['fill'].value f_ = copy.copy(f) f_.type_id = fill s = f.options.get('size', None).value return "'{id}' / construct.Array({size}, {type})".format(id=f.identifier, size=s, type=type_map.get(f_.type_id, 'construct.Byte')) elif f.type_id == 'array': fill = f.options['fill'].value f_ = copy.copy(f) f_.type_id = fill return "construct.GreedyRange(%s)" % construct_format(f_) else: return "'%s' / construct.Struct(%s._parser)" % (f.identifier, f.type_id) return formatted
Formats for Construct.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/python.py#L57-L83
swift-nav/libsbp
generator/sbpg/targets/python.py
render_source
def render_source(output_dir, package_spec, jenv=JENV): """ Render and output """ path, name = package_spec.filepath directory = output_dir destination_filename = "%s/%s.py" % (directory, name) py_template = jenv.get_template(TEMPLATE_NAME) module_path = ".".join(package_spec.identifier.split(".")[1:-1]) includes = [".".join(i.split(".")[:-1]) for i in package_spec.includes] includes = [i for i in includes if i != "types"] print(destination_filename, includes) with open(destination_filename, 'w') as f: f.write(py_template.render(msgs=package_spec.definitions, filepath="/".join(package_spec.filepath) + ".yaml", module_path=module_path, include=includes, timestamp=package_spec.creation_timestamp, description=package_spec.description))
python
def render_source(output_dir, package_spec, jenv=JENV): """ Render and output """ path, name = package_spec.filepath directory = output_dir destination_filename = "%s/%s.py" % (directory, name) py_template = jenv.get_template(TEMPLATE_NAME) module_path = ".".join(package_spec.identifier.split(".")[1:-1]) includes = [".".join(i.split(".")[:-1]) for i in package_spec.includes] includes = [i for i in includes if i != "types"] print(destination_filename, includes) with open(destination_filename, 'w') as f: f.write(py_template.render(msgs=package_spec.definitions, filepath="/".join(package_spec.filepath) + ".yaml", module_path=module_path, include=includes, timestamp=package_spec.creation_timestamp, description=package_spec.description))
Render and output
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/python.py#L105-L123
swift-nav/libsbp
python/sbp/client/drivers/network_drivers.py
TCPDriver.read
def read(self, size): """ Read wrapper. Parameters ---------- size : int Number of bytes to read """ data = None while True: try: data = self.handle.recv(size) except socket.timeout as socket_error: self._reconnect(socket_error) except socket.error as socket_error: # this is fine, just retry if socket_error.errno == errno.EINTR: continue self._reconnect(IOError) if not data: self._reconnect(IOError) break return data
python
def read(self, size): """ Read wrapper. Parameters ---------- size : int Number of bytes to read """ data = None while True: try: data = self.handle.recv(size) except socket.timeout as socket_error: self._reconnect(socket_error) except socket.error as socket_error: # this is fine, just retry if socket_error.errno == errno.EINTR: continue self._reconnect(IOError) if not data: self._reconnect(IOError) break return data
Read wrapper. Parameters ---------- size : int Number of bytes to read
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/drivers/network_drivers.py#L81-L104
swift-nav/libsbp
python/sbp/client/drivers/network_drivers.py
TCPDriver.write
def write(self, s): """ Write wrapper. Parameters ---------- s : bytes Bytes to write """ try: self._write_lock.acquire() self.handle.sendall(s) except socket.timeout: self._connect() except socket.error: raise IOError finally: self._write_lock.release()
python
def write(self, s): """ Write wrapper. Parameters ---------- s : bytes Bytes to write """ try: self._write_lock.acquire() self.handle.sendall(s) except socket.timeout: self._connect() except socket.error: raise IOError finally: self._write_lock.release()
Write wrapper. Parameters ---------- s : bytes Bytes to write
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/drivers/network_drivers.py#L109-L126
swift-nav/libsbp
python/sbp/utils.py
exclude_fields
def exclude_fields(obj, exclude=EXCLUDE): """ Return dict of object without parent attrs. """ return dict([(k, getattr(obj, k)) for k in obj.__slots__ if k not in exclude])
python
def exclude_fields(obj, exclude=EXCLUDE): """ Return dict of object without parent attrs. """ return dict([(k, getattr(obj, k)) for k in obj.__slots__ if k not in exclude])
Return dict of object without parent attrs.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/utils.py#L21-L25
swift-nav/libsbp
python/sbp/utils.py
walk_json_dict
def walk_json_dict(coll): """ Flatten a parsed SBP object into a dicts and lists, which are compatible for JSON output. Parameters ---------- coll : dict """ if isinstance(coll, dict): return dict((k, walk_json_dict(v)) for (k, v) in iter(coll.items())) elif isinstance(coll, bytes): return coll.decode('ascii') elif hasattr(coll, '__iter__') and not isinstance(coll, str): return [walk_json_dict(seq) for seq in coll] else: return coll
python
def walk_json_dict(coll): """ Flatten a parsed SBP object into a dicts and lists, which are compatible for JSON output. Parameters ---------- coll : dict """ if isinstance(coll, dict): return dict((k, walk_json_dict(v)) for (k, v) in iter(coll.items())) elif isinstance(coll, bytes): return coll.decode('ascii') elif hasattr(coll, '__iter__') and not isinstance(coll, str): return [walk_json_dict(seq) for seq in coll] else: return coll
Flatten a parsed SBP object into a dicts and lists, which are compatible for JSON output. Parameters ---------- coll : dict
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/utils.py#L28-L45
swift-nav/libsbp
python/sbp/utils.py
containerize
def containerize(coll): """Walk attribute fields passed from an SBP message and convert to Containers where appropriate. Needed for Construct proper serialization. Parameters ---------- coll : dict """ if isinstance(coll, Container): [setattr(coll, k, containerize(v)) for (k, v) in coll.items()] return coll elif isinstance(coll, dict): return containerize(Container(**coll)) elif isinstance(coll, list): for j, i in enumerate(coll): if isinstance(i, dict): coll[j] = containerize(Container(**i)) return coll else: return coll
python
def containerize(coll): """Walk attribute fields passed from an SBP message and convert to Containers where appropriate. Needed for Construct proper serialization. Parameters ---------- coll : dict """ if isinstance(coll, Container): [setattr(coll, k, containerize(v)) for (k, v) in coll.items()] return coll elif isinstance(coll, dict): return containerize(Container(**coll)) elif isinstance(coll, list): for j, i in enumerate(coll): if isinstance(i, dict): coll[j] = containerize(Container(**i)) return coll else: return coll
Walk attribute fields passed from an SBP message and convert to Containers where appropriate. Needed for Construct proper serialization. Parameters ---------- coll : dict
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/utils.py#L48-L69
swift-nav/libsbp
python/sbp/utils.py
fmt_repr
def fmt_repr(obj): """Print a orphaned string representation of an object without the clutter of its parent object. """ items = ["%s = %r" % (k, v) for k, v in list(exclude_fields(obj).items())] return "<%s: {%s}>" % (obj.__class__.__name__, ', '.join(items))
python
def fmt_repr(obj): """Print a orphaned string representation of an object without the clutter of its parent object. """ items = ["%s = %r" % (k, v) for k, v in list(exclude_fields(obj).items())] return "<%s: {%s}>" % (obj.__class__.__name__, ', '.join(items))
Print a orphaned string representation of an object without the clutter of its parent object.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/utils.py#L72-L78
swift-nav/libsbp
generator/sbpg/specs/yaml2.py
read_spec
def read_spec(filename, verbose=False): """ Read an SBP specification. Parameters ---------- filename : str Local filename for specification. verbose : bool Print out some debugging info Returns ---------- Raises ---------- Exception On empty file. yaml.YAMLError On Yaml parsing error voluptuous.Invalid On invalid SBP schema """ contents = None with open(filename, 'r') as f: contents = yaml.load(f) if contents is None: raise Exception("Empty yaml file: %s." % filename) try: s.package_schema(contents) except Exception as e: sys.stderr.write("Invalid SBP YAML specification: %s.\n" % filename) raise e return contents
python
def read_spec(filename, verbose=False): """ Read an SBP specification. Parameters ---------- filename : str Local filename for specification. verbose : bool Print out some debugging info Returns ---------- Raises ---------- Exception On empty file. yaml.YAMLError On Yaml parsing error voluptuous.Invalid On invalid SBP schema """ contents = None with open(filename, 'r') as f: contents = yaml.load(f) if contents is None: raise Exception("Empty yaml file: %s." % filename) try: s.package_schema(contents) except Exception as e: sys.stderr.write("Invalid SBP YAML specification: %s.\n" % filename) raise e return contents
Read an SBP specification. Parameters ---------- filename : str Local filename for specification. verbose : bool Print out some debugging info Returns ---------- Raises ---------- Exception On empty file. yaml.YAMLError On Yaml parsing error voluptuous.Invalid On invalid SBP schema
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/specs/yaml2.py#L32-L65
swift-nav/libsbp
generator/sbpg/specs/yaml2.py
get_files
def get_files(input_file): """ Initializes an index of files to generate, returns the base directory and index. """ file_index = {} base_dir = None if os.path.isfile(input_file): file_index[input_file] = None base_dir = os.path.dirname(input_file) elif os.path.isdir(input_file): base_dir = input_file for inf in glob.glob(input_file + s.SBP_EXTENSION): file_index[os.path.abspath(inf)] = None for inf in glob.glob(input_file + '/*'): base, index = get_files(os.path.abspath(inf)) z = file_index.copy() z.update(index) file_index = z return (base_dir, file_index)
python
def get_files(input_file): """ Initializes an index of files to generate, returns the base directory and index. """ file_index = {} base_dir = None if os.path.isfile(input_file): file_index[input_file] = None base_dir = os.path.dirname(input_file) elif os.path.isdir(input_file): base_dir = input_file for inf in glob.glob(input_file + s.SBP_EXTENSION): file_index[os.path.abspath(inf)] = None for inf in glob.glob(input_file + '/*'): base, index = get_files(os.path.abspath(inf)) z = file_index.copy() z.update(index) file_index = z return (base_dir, file_index)
Initializes an index of files to generate, returns the base directory and index.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/specs/yaml2.py#L102-L122
swift-nav/libsbp
generator/sbpg/specs/yaml2.py
resolve_deps
def resolve_deps(base_dir, file_index): """ Given a base directory and an initial set of files, retrieves dependencies and adds them to the file_index. """ def flatten(tree, index = {}): for include in tree.get('include', []): fname = base_dir + "/" + include assert os.path.exists(fname), "File %s does not exist." % fname if fname not in index: index[fname] = read_spec(fname) index.update(flatten(index[fname], file_index)) return index for fname, contents in file_index.items(): file_index[fname] = read_spec(fname) file_index.update(flatten(file_index[fname], file_index)) return file_index
python
def resolve_deps(base_dir, file_index): """ Given a base directory and an initial set of files, retrieves dependencies and adds them to the file_index. """ def flatten(tree, index = {}): for include in tree.get('include', []): fname = base_dir + "/" + include assert os.path.exists(fname), "File %s does not exist." % fname if fname not in index: index[fname] = read_spec(fname) index.update(flatten(index[fname], file_index)) return index for fname, contents in file_index.items(): file_index[fname] = read_spec(fname) file_index.update(flatten(file_index[fname], file_index)) return file_index
Given a base directory and an initial set of files, retrieves dependencies and adds them to the file_index.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/specs/yaml2.py#L146-L163
swift-nav/libsbp
generator/sbpg/specs/yaml2.py
mk_package
def mk_package(contents): """Instantiates a package specification from a parsed "AST" of a package. Parameters ---------- contents : dict Returns ---------- PackageSpecification """ package = contents.get('package', None) description = contents.get('description', None) include = contents.get('include', []) definitions = contents.get('definitions', []) resolved = [mk_definition(defn) for defn in definitions] return sbp.PackageSpecification(identifier=package, description=description, includes=include, definitions=resolved, render_source=contents.get('render_source', True), stable=contents.get('stable', False), public=contents.get('public', True))
python
def mk_package(contents): """Instantiates a package specification from a parsed "AST" of a package. Parameters ---------- contents : dict Returns ---------- PackageSpecification """ package = contents.get('package', None) description = contents.get('description', None) include = contents.get('include', []) definitions = contents.get('definitions', []) resolved = [mk_definition(defn) for defn in definitions] return sbp.PackageSpecification(identifier=package, description=description, includes=include, definitions=resolved, render_source=contents.get('render_source', True), stable=contents.get('stable', False), public=contents.get('public', True))
Instantiates a package specification from a parsed "AST" of a package. Parameters ---------- contents : dict Returns ---------- PackageSpecification
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/specs/yaml2.py#L216-L240
swift-nav/libsbp
generator/sbpg/specs/yaml2.py
mk_definition
def mk_definition(defn): """Instantiates a struct or SBP message specification from a parsed "AST" of a struct or message. Parameters ---------- defn : dict Returns ---------- A Definition or a specialization of a definition, like a Struct """ assert len(defn) == 1 identifier, contents = next(iter(defn.items())) fs = [mk_field(f) for f in contents.get('fields', [])] return sbp.resolve_type(sbp.Definition(identifier=identifier, sbp_id=contents.get('id', None), short_desc=contents.get('short_desc', None), desc=contents.get('desc', None), type_id=contents.get('type'), fields=fs, public=contents.get('public', True)))
python
def mk_definition(defn): """Instantiates a struct or SBP message specification from a parsed "AST" of a struct or message. Parameters ---------- defn : dict Returns ---------- A Definition or a specialization of a definition, like a Struct """ assert len(defn) == 1 identifier, contents = next(iter(defn.items())) fs = [mk_field(f) for f in contents.get('fields', [])] return sbp.resolve_type(sbp.Definition(identifier=identifier, sbp_id=contents.get('id', None), short_desc=contents.get('short_desc', None), desc=contents.get('desc', None), type_id=contents.get('type'), fields=fs, public=contents.get('public', True)))
Instantiates a struct or SBP message specification from a parsed "AST" of a struct or message. Parameters ---------- defn : dict Returns ---------- A Definition or a specialization of a definition, like a Struct
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/specs/yaml2.py#L241-L263
swift-nav/libsbp
generator/sbpg/specs/yaml2.py
mk_field
def mk_field(field): """Instantiates a field specification from a parsed "AST" of a field. Parameters ---------- field : dict Returns ---------- A Field or a specialization of a field, like a bitfield. """ assert len(field) == 1 identifier, contents = next(iter(field.items())) contents = dict(list({'units': '', 'n_with_values': 0}.items()) + list(contents.items())) return sbp.resolve_type(sbp.Field(identifier=identifier, type_id=contents.pop('type'), options=contents))
python
def mk_field(field): """Instantiates a field specification from a parsed "AST" of a field. Parameters ---------- field : dict Returns ---------- A Field or a specialization of a field, like a bitfield. """ assert len(field) == 1 identifier, contents = next(iter(field.items())) contents = dict(list({'units': '', 'n_with_values': 0}.items()) + list(contents.items())) return sbp.resolve_type(sbp.Field(identifier=identifier, type_id=contents.pop('type'), options=contents))
Instantiates a field specification from a parsed "AST" of a field. Parameters ---------- field : dict Returns ---------- A Field or a specialization of a field, like a bitfield.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/specs/yaml2.py#L265-L283
swift-nav/libsbp
generator/sbpg/targets/haskell.py
to_global
def to_global(s): """ Format a global variable name. """ if s.startswith('GPSTime'): s = 'Gps' + s[3:] if '_' in s: s = "".join([i.capitalize() for i in s.split("_")]) return s[0].lower() + s[1:]
python
def to_global(s): """ Format a global variable name. """ if s.startswith('GPSTime'): s = 'Gps' + s[3:] if '_' in s: s = "".join([i.capitalize() for i in s.split("_")]) return s[0].lower() + s[1:]
Format a global variable name.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/haskell.py#L73-L82
swift-nav/libsbp
generator/sbpg/targets/haskell.py
to_data
def to_data(s): """ Format a data variable name. """ if s.startswith('GPSTime'): s = 'Gps' + s[3:] if '_' in s: return "".join([i.capitalize() for i in s.split("_")]) return s
python
def to_data(s): """ Format a data variable name. """ if s.startswith('GPSTime'): s = 'Gps' + s[3:] if '_' in s: return "".join([i.capitalize() for i in s.split("_")]) return s
Format a data variable name.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/haskell.py#L84-L93
swift-nav/libsbp
generator/sbpg/targets/haskell.py
to_type
def to_type(f, type_map=CONSTRUCT_CODE): """ Format a the proper type. """ name = f.type_id if name.startswith('GPSTime'): name = 'Gps' + name[3:] if type_map.get(name, None): return type_map.get(name, None) elif name == 'array': fill = f.options['fill'].value f_ = copy.copy(f) f_.type_id = fill return "[%s]" % to_type(f_) return name
python
def to_type(f, type_map=CONSTRUCT_CODE): """ Format a the proper type. """ name = f.type_id if name.startswith('GPSTime'): name = 'Gps' + name[3:] if type_map.get(name, None): return type_map.get(name, None) elif name == 'array': fill = f.options['fill'].value f_ = copy.copy(f) f_.type_id = fill return "[%s]" % to_type(f_) return name
Format a the proper type.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/haskell.py#L95-L109
swift-nav/libsbp
generator/sbpg/targets/haskell.py
render_source
def render_source(output_dir, package_spec): """ Render and output to a directory given a package specification. """ path, name = package_spec.filepath module_prefix = "SwiftNav.SBP" module_name = camel_case(name) full_module_name = ".".join([module_prefix, module_name]) destination_filename = "%s/src/SwiftNav/SBP/%s.hs" % (output_dir, module_name) py_template = JENV.get_template(MESSAGES_TEMPLATE_NAME) module_includes = [".".join([module_prefix] + [camel_case(j) for j in i.split(".")[:-1]]) for i in package_spec.includes] with open(destination_filename, 'w') as f: f.write(py_template.render(msgs=package_spec.definitions, description=package_spec.description, module_name=full_module_name, module_includes=module_includes))
python
def render_source(output_dir, package_spec): """ Render and output to a directory given a package specification. """ path, name = package_spec.filepath module_prefix = "SwiftNav.SBP" module_name = camel_case(name) full_module_name = ".".join([module_prefix, module_name]) destination_filename = "%s/src/SwiftNav/SBP/%s.hs" % (output_dir, module_name) py_template = JENV.get_template(MESSAGES_TEMPLATE_NAME) module_includes = [".".join([module_prefix] + [camel_case(j) for j in i.split(".")[:-1]]) for i in package_spec.includes] with open(destination_filename, 'w') as f: f.write(py_template.render(msgs=package_spec.definitions, description=package_spec.description, module_name=full_module_name, module_includes=module_includes))
Render and output to a directory given a package specification.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/haskell.py#L164-L181
swift-nav/libsbp
generator/sbpg/targets/protobuf.py
to_comment
def to_comment(value): """ Builds a comment. """ if value is None: return if len(value.split('\n')) == 1: return "* " + value else: return '\n'.join([' * ' + l for l in value.split('\n')[:-1]])
python
def to_comment(value): """ Builds a comment. """ if value is None: return if len(value.split('\n')) == 1: return "* " + value else: return '\n'.join([' * ' + l for l in value.split('\n')[:-1]])
Builds a comment.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/protobuf.py#L39-L48
swift-nav/libsbp
generator/sbpg/targets/protobuf.py
to_identifier
def to_identifier(s): """ Convert snake_case to camel_case. """ if s.startswith('GPS'): s = 'Gps' + s[3:] return ''.join([i.capitalize() for i in s.split('_')]) if '_' in s else s
python
def to_identifier(s): """ Convert snake_case to camel_case. """ if s.startswith('GPS'): s = 'Gps' + s[3:] return ''.join([i.capitalize() for i in s.split('_')]) if '_' in s else s
Convert snake_case to camel_case.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/protobuf.py#L50-L56
swift-nav/libsbp
generator/sbpg/targets/protobuf.py
render_source
def render_source(output_dir, package_spec): """ Render and output to a directory given a package specification. """ path, name = package_spec.filepath destination_filename = '%s/%s.proto' % (output_dir, name) pb_template = JENV.get_template(MESSAGES_TEMPLATE_NAME) includes = [include[:-5] if include.endswith('.yaml') else include for include in package_spec.includes] if 'types' in includes: includes.remove('types') with open(destination_filename, 'w') as f: f.write(pb_template.render( name=name, package=package_spec.identifier, messages=package_spec.definitions, includes=includes, description=package_spec.description, ))
python
def render_source(output_dir, package_spec): """ Render and output to a directory given a package specification. """ path, name = package_spec.filepath destination_filename = '%s/%s.proto' % (output_dir, name) pb_template = JENV.get_template(MESSAGES_TEMPLATE_NAME) includes = [include[:-5] if include.endswith('.yaml') else include for include in package_spec.includes] if 'types' in includes: includes.remove('types') with open(destination_filename, 'w') as f: f.write(pb_template.render( name=name, package=package_spec.identifier, messages=package_spec.definitions, includes=includes, description=package_spec.description, ))
Render and output to a directory given a package specification.
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/protobuf.py#L81-L98
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxapp_container_functions.py
load_app_resource
def load_app_resource(**kwargs): ''' :param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project" :raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_RESOURCES_ID" or "DX_PROJECT_CONTEXT_ID" is not found in the environment variables :returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object Searches for a data object in the app resources container matching the given keyword arguments. If found, the object will be cloned into the running job's workspace container, and the handler for it will be returned. If the app resources container ID is not found in DX_RESOURCES_ID, falls back to looking in the current project. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_app_resource(name="Indexed genome", classname='file') dxpy.download_dxfile(x) ''' if 'project' in kwargs: raise DXError('Unexpected kwarg: "project"') if dxpy.JOB_ID is None: raise DXError('Not called by a job') if 'DX_RESOURCES_ID' not in os.environ and 'DX_PROJECT_CONTEXT_ID' not in os.environ: raise DXError('App resources container ID could not be found') kwargs['project'] = os.environ.get('DX_RESOURCES_ID', os.environ.get('DX_PROJECT_CONTEXT_ID')) kwargs['return_handler'] = True return find_one_data_object(**kwargs)
python
def load_app_resource(**kwargs): ''' :param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project" :raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_RESOURCES_ID" or "DX_PROJECT_CONTEXT_ID" is not found in the environment variables :returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object Searches for a data object in the app resources container matching the given keyword arguments. If found, the object will be cloned into the running job's workspace container, and the handler for it will be returned. If the app resources container ID is not found in DX_RESOURCES_ID, falls back to looking in the current project. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_app_resource(name="Indexed genome", classname='file') dxpy.download_dxfile(x) ''' if 'project' in kwargs: raise DXError('Unexpected kwarg: "project"') if dxpy.JOB_ID is None: raise DXError('Not called by a job') if 'DX_RESOURCES_ID' not in os.environ and 'DX_PROJECT_CONTEXT_ID' not in os.environ: raise DXError('App resources container ID could not be found') kwargs['project'] = os.environ.get('DX_RESOURCES_ID', os.environ.get('DX_PROJECT_CONTEXT_ID')) kwargs['return_handler'] = True return find_one_data_object(**kwargs)
:param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project" :raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_RESOURCES_ID" or "DX_PROJECT_CONTEXT_ID" is not found in the environment variables :returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object Searches for a data object in the app resources container matching the given keyword arguments. If found, the object will be cloned into the running job's workspace container, and the handler for it will be returned. If the app resources container ID is not found in DX_RESOURCES_ID, falls back to looking in the current project. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_app_resource(name="Indexed genome", classname='file') dxpy.download_dxfile(x)
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxapp_container_functions.py#L34-L62
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxapp_container_functions.py
load_from_cache
def load_from_cache(**kwargs): ''' :param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project" :raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_PROJECT_CACHE_ID" is not found in the environment variables :returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object Searches for a data object in the project cache container matching the given keyword arguments. If found, the object will be cloned into the running job's workspace container, and the handler for it will be returned. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_from_cache(name="Indexed genome", classname='file') if x is None: x = compute_result(*args) save_to_cache(x) ''' if 'project' in kwargs: raise DXError('Unexpected kwarg: "project"') if dxpy.JOB_ID is None: raise DXError('Not called by a job') if 'DX_PROJECT_CACHE_ID' not in os.environ: raise DXError('Project cache ID could not be found in the environment variable DX_PROJECT_CACHE_ID') kwargs['project'] = os.environ.get('DX_PROJECT_CACHE_ID') kwargs['return_handler'] = True cached_object = find_one_data_object(**kwargs) if cached_object is None: return None return cached_object.clone(dxpy.WORKSPACE_ID)
python
def load_from_cache(**kwargs): ''' :param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project" :raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_PROJECT_CACHE_ID" is not found in the environment variables :returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object Searches for a data object in the project cache container matching the given keyword arguments. If found, the object will be cloned into the running job's workspace container, and the handler for it will be returned. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_from_cache(name="Indexed genome", classname='file') if x is None: x = compute_result(*args) save_to_cache(x) ''' if 'project' in kwargs: raise DXError('Unexpected kwarg: "project"') if dxpy.JOB_ID is None: raise DXError('Not called by a job') if 'DX_PROJECT_CACHE_ID' not in os.environ: raise DXError('Project cache ID could not be found in the environment variable DX_PROJECT_CACHE_ID') kwargs['project'] = os.environ.get('DX_PROJECT_CACHE_ID') kwargs['return_handler'] = True cached_object = find_one_data_object(**kwargs) if cached_object is None: return None return cached_object.clone(dxpy.WORKSPACE_ID)
:param kwargs: keyword args for :func:`~dxpy.bindings.search.find_one_data_object`, with the exception of "project" :raises: :exc:`~dxpy.exceptions.DXError` if "project" is given, if this is called with dxpy.JOB_ID not set, or if "DX_PROJECT_CACHE_ID" is not found in the environment variables :returns: None if no matching object is found; otherwise returns a dxpy object handler for that class of object Searches for a data object in the project cache container matching the given keyword arguments. If found, the object will be cloned into the running job's workspace container, and the handler for it will be returned. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_from_cache(name="Indexed genome", classname='file') if x is None: x = compute_result(*args) save_to_cache(x)
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxapp_container_functions.py#L64-L100
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxapp_container_functions.py
save_to_cache
def save_to_cache(dxobject): ''' :param dxobject: a dxpy object handler for an object to save to the cache :raises: :exc:`~dxpy.exceptions.DXError` if this is called with dxpy.JOB_ID not set, or if "DX_PROJECT_CACHE_ID" is not found in the environment variables Clones the given object to the project cache. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_from_cache(name="Indexed genome", classname='file') if x is None: x = compute_result(*args) save_to_cache(x) ''' if dxpy.JOB_ID is None: raise DXError('Not called by a job') if 'DX_PROJECT_CACHE_ID' not in os.environ: raise DXError('Project cache ID could not be found in the environment variable DX_PROJECT_CACHE_ID') dxobject.clone(os.environ.get('DX_PROJECT_CACHE_ID'))
python
def save_to_cache(dxobject): ''' :param dxobject: a dxpy object handler for an object to save to the cache :raises: :exc:`~dxpy.exceptions.DXError` if this is called with dxpy.JOB_ID not set, or if "DX_PROJECT_CACHE_ID" is not found in the environment variables Clones the given object to the project cache. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_from_cache(name="Indexed genome", classname='file') if x is None: x = compute_result(*args) save_to_cache(x) ''' if dxpy.JOB_ID is None: raise DXError('Not called by a job') if 'DX_PROJECT_CACHE_ID' not in os.environ: raise DXError('Project cache ID could not be found in the environment variable DX_PROJECT_CACHE_ID') dxobject.clone(os.environ.get('DX_PROJECT_CACHE_ID'))
:param dxobject: a dxpy object handler for an object to save to the cache :raises: :exc:`~dxpy.exceptions.DXError` if this is called with dxpy.JOB_ID not set, or if "DX_PROJECT_CACHE_ID" is not found in the environment variables Clones the given object to the project cache. Example:: @dxpy.entry_point('main') def main(*args, **kwargs): x = load_from_cache(name="Indexed genome", classname='file') if x is None: x = compute_result(*args) save_to_cache(x)
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxapp_container_functions.py#L103-L125
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxapplet.py
DXExecutable._get_run_input_common_fields
def _get_run_input_common_fields(executable_input, **kwargs): ''' Takes the same arguments as the run method. Creates an input hash for the /executable-xxxx/run method, translating ONLY the fields that can be handled uniformly across all executables: project, folder, name, tags, properties, details, depends_on, allow_ssh, debug, delay_workspace_destruction, ignore_reuse, and extra_args. ''' project = kwargs.get('project') or dxpy.WORKSPACE_ID run_input = {"input": executable_input} for arg in ['folder', 'name', 'tags', 'properties', 'details']: if kwargs.get(arg) is not None: run_input[arg] = kwargs[arg] if kwargs.get('instance_type') is not None or kwargs.get('cluster_spec') is not None: instance_type_srd = SystemRequirementsDict.from_instance_type(kwargs.get('instance_type')) cluster_spec_srd = SystemRequirementsDict(kwargs.get('cluster_spec')) run_input["systemRequirements"] = (instance_type_srd + cluster_spec_srd).as_dict() if kwargs.get('depends_on') is not None: run_input["dependsOn"] = [] if isinstance(kwargs['depends_on'], list): for item in kwargs['depends_on']: if isinstance(item, DXJob) or isinstance(item, DXDataObject): if item.get_id() is None: raise DXError('A dxpy handler given in depends_on does not have an ID set') run_input["dependsOn"].append(item.get_id()) elif isinstance(item, basestring): run_input['dependsOn'].append(item) else: raise DXError('Expected elements of depends_on to only be either instances of DXJob or DXDataObject, or strings') else: raise DXError('Expected depends_on field to be a list') if kwargs.get('delay_workspace_destruction') is not None: run_input["delayWorkspaceDestruction"] = kwargs['delay_workspace_destruction'] if kwargs.get('allow_ssh') is not None: run_input["allowSSH"] = kwargs['allow_ssh'] if kwargs.get('debug') is not None: run_input["debug"] = kwargs['debug'] if kwargs.get('priority') is not None: run_input["priority"] = kwargs['priority'] if kwargs.get('ignore_reuse') is not None: run_input["ignoreReuse"] = kwargs['ignore_reuse'] if dxpy.JOB_ID is None: run_input["project"] = project if kwargs.get('extra_args') is not None: merge(run_input, kwargs['extra_args']) return run_input
python
def _get_run_input_common_fields(executable_input, **kwargs): ''' Takes the same arguments as the run method. Creates an input hash for the /executable-xxxx/run method, translating ONLY the fields that can be handled uniformly across all executables: project, folder, name, tags, properties, details, depends_on, allow_ssh, debug, delay_workspace_destruction, ignore_reuse, and extra_args. ''' project = kwargs.get('project') or dxpy.WORKSPACE_ID run_input = {"input": executable_input} for arg in ['folder', 'name', 'tags', 'properties', 'details']: if kwargs.get(arg) is not None: run_input[arg] = kwargs[arg] if kwargs.get('instance_type') is not None or kwargs.get('cluster_spec') is not None: instance_type_srd = SystemRequirementsDict.from_instance_type(kwargs.get('instance_type')) cluster_spec_srd = SystemRequirementsDict(kwargs.get('cluster_spec')) run_input["systemRequirements"] = (instance_type_srd + cluster_spec_srd).as_dict() if kwargs.get('depends_on') is not None: run_input["dependsOn"] = [] if isinstance(kwargs['depends_on'], list): for item in kwargs['depends_on']: if isinstance(item, DXJob) or isinstance(item, DXDataObject): if item.get_id() is None: raise DXError('A dxpy handler given in depends_on does not have an ID set') run_input["dependsOn"].append(item.get_id()) elif isinstance(item, basestring): run_input['dependsOn'].append(item) else: raise DXError('Expected elements of depends_on to only be either instances of DXJob or DXDataObject, or strings') else: raise DXError('Expected depends_on field to be a list') if kwargs.get('delay_workspace_destruction') is not None: run_input["delayWorkspaceDestruction"] = kwargs['delay_workspace_destruction'] if kwargs.get('allow_ssh') is not None: run_input["allowSSH"] = kwargs['allow_ssh'] if kwargs.get('debug') is not None: run_input["debug"] = kwargs['debug'] if kwargs.get('priority') is not None: run_input["priority"] = kwargs['priority'] if kwargs.get('ignore_reuse') is not None: run_input["ignoreReuse"] = kwargs['ignore_reuse'] if dxpy.JOB_ID is None: run_input["project"] = project if kwargs.get('extra_args') is not None: merge(run_input, kwargs['extra_args']) return run_input
Takes the same arguments as the run method. Creates an input hash for the /executable-xxxx/run method, translating ONLY the fields that can be handled uniformly across all executables: project, folder, name, tags, properties, details, depends_on, allow_ssh, debug, delay_workspace_destruction, ignore_reuse, and extra_args.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxapplet.py#L46-L100
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxapplet.py
DXExecutable._get_run_input_fields_for_applet
def _get_run_input_fields_for_applet(executable_input, **kwargs): ''' Takes the same arguments as the run method. Creates an input hash for the /applet-xxxx/run method. ''' # Although it says "for_applet", this is factored out of # DXApplet because apps currently use the same mechanism for unsupported_arg in ['stage_instance_types', 'stage_folders', 'rerun_stages', 'ignore_reuse_stages']: if kwargs.get(unsupported_arg): raise DXError(unsupported_arg + ' is not supported for applets (only workflows)') return DXExecutable._get_run_input_common_fields(executable_input, **kwargs)
python
def _get_run_input_fields_for_applet(executable_input, **kwargs): ''' Takes the same arguments as the run method. Creates an input hash for the /applet-xxxx/run method. ''' # Although it says "for_applet", this is factored out of # DXApplet because apps currently use the same mechanism for unsupported_arg in ['stage_instance_types', 'stage_folders', 'rerun_stages', 'ignore_reuse_stages']: if kwargs.get(unsupported_arg): raise DXError(unsupported_arg + ' is not supported for applets (only workflows)') return DXExecutable._get_run_input_common_fields(executable_input, **kwargs)
Takes the same arguments as the run method. Creates an input hash for the /applet-xxxx/run method.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxapplet.py#L103-L114
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxapplet.py
DXExecutable.run
def run(self, executable_input, project=None, folder=None, name=None, tags=None, properties=None, details=None, instance_type=None, stage_instance_types=None, stage_folders=None, rerun_stages=None, cluster_spec=None, depends_on=None, allow_ssh=None, debug=None, delay_workspace_destruction=None, priority=None, ignore_reuse=None, ignore_reuse_stages=None, extra_args=None, **kwargs): ''' :param executable_input: Hash of the executable's input arguments :type executable_input: dict :param project: Project ID of the project context :type project: string :param folder: Folder in which executable's outputs will be placed in *project* :type folder: string :param name: Name for the new job (default is "<name of the executable>") :type name: string :param tags: Tags to associate with the job :type tags: list of strings :param properties: Properties to associate with the job :type properties: dict with string values :param details: Details to set for the job :type details: dict or list :param instance_type: Instance type on which the jobs will be run, or a dict mapping function names to instance type requests :type instance_type: string or dict :param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID :type depends_on: list :param allow_ssh: List of hostname or IP masks to allow SSH connections from :type allow_ssh: list :param debug: Configuration options for job debugging :type debug: dict :param delay_workspace_destruction: Whether to keep the job's temporary workspace around for debugging purposes for 3 days after it succeeds or fails :type delay_workspace_destruction: boolean :param priority: Priority level to request for all jobs created in the execution tree, either "normal" or "high" :type priority: string :param ignore_reuse: Disable job reuse for this execution :type ignore_reuse: boolean :param ignore_reuse_stages: Stages of a workflow (IDs, names, or indices) or "*" for which job reuse should be disabled :type ignore_reuse_stages: list :param extra_args: If provided, a hash of options that will be merged into the underlying JSON given for the API call :type extra_args: dict :returns: Object handler of the newly created job :rtype: :class:`~dxpy.bindings.dxjob.DXJob` Creates a new job that executes the function "main" of this executable with the given input *executable_input*. ''' # stage_instance_types, stage_folders, and rerun_stages are # only supported for workflows, but we include them # here. Applet-based executables should detect when they # receive a truthy workflow-specific value and raise an error. run_input = self._get_run_input(executable_input, project=project, folder=folder, name=name, tags=tags, properties=properties, details=details, instance_type=instance_type, stage_instance_types=stage_instance_types, stage_folders=stage_folders, rerun_stages=rerun_stages, cluster_spec=cluster_spec, depends_on=depends_on, allow_ssh=allow_ssh, ignore_reuse=ignore_reuse, ignore_reuse_stages=ignore_reuse_stages, debug=debug, delay_workspace_destruction=delay_workspace_destruction, priority=priority, extra_args=extra_args) return self._run_impl(run_input, **kwargs)
python
def run(self, executable_input, project=None, folder=None, name=None, tags=None, properties=None, details=None, instance_type=None, stage_instance_types=None, stage_folders=None, rerun_stages=None, cluster_spec=None, depends_on=None, allow_ssh=None, debug=None, delay_workspace_destruction=None, priority=None, ignore_reuse=None, ignore_reuse_stages=None, extra_args=None, **kwargs): ''' :param executable_input: Hash of the executable's input arguments :type executable_input: dict :param project: Project ID of the project context :type project: string :param folder: Folder in which executable's outputs will be placed in *project* :type folder: string :param name: Name for the new job (default is "<name of the executable>") :type name: string :param tags: Tags to associate with the job :type tags: list of strings :param properties: Properties to associate with the job :type properties: dict with string values :param details: Details to set for the job :type details: dict or list :param instance_type: Instance type on which the jobs will be run, or a dict mapping function names to instance type requests :type instance_type: string or dict :param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID :type depends_on: list :param allow_ssh: List of hostname or IP masks to allow SSH connections from :type allow_ssh: list :param debug: Configuration options for job debugging :type debug: dict :param delay_workspace_destruction: Whether to keep the job's temporary workspace around for debugging purposes for 3 days after it succeeds or fails :type delay_workspace_destruction: boolean :param priority: Priority level to request for all jobs created in the execution tree, either "normal" or "high" :type priority: string :param ignore_reuse: Disable job reuse for this execution :type ignore_reuse: boolean :param ignore_reuse_stages: Stages of a workflow (IDs, names, or indices) or "*" for which job reuse should be disabled :type ignore_reuse_stages: list :param extra_args: If provided, a hash of options that will be merged into the underlying JSON given for the API call :type extra_args: dict :returns: Object handler of the newly created job :rtype: :class:`~dxpy.bindings.dxjob.DXJob` Creates a new job that executes the function "main" of this executable with the given input *executable_input*. ''' # stage_instance_types, stage_folders, and rerun_stages are # only supported for workflows, but we include them # here. Applet-based executables should detect when they # receive a truthy workflow-specific value and raise an error. run_input = self._get_run_input(executable_input, project=project, folder=folder, name=name, tags=tags, properties=properties, details=details, instance_type=instance_type, stage_instance_types=stage_instance_types, stage_folders=stage_folders, rerun_stages=rerun_stages, cluster_spec=cluster_spec, depends_on=depends_on, allow_ssh=allow_ssh, ignore_reuse=ignore_reuse, ignore_reuse_stages=ignore_reuse_stages, debug=debug, delay_workspace_destruction=delay_workspace_destruction, priority=priority, extra_args=extra_args) return self._run_impl(run_input, **kwargs)
:param executable_input: Hash of the executable's input arguments :type executable_input: dict :param project: Project ID of the project context :type project: string :param folder: Folder in which executable's outputs will be placed in *project* :type folder: string :param name: Name for the new job (default is "<name of the executable>") :type name: string :param tags: Tags to associate with the job :type tags: list of strings :param properties: Properties to associate with the job :type properties: dict with string values :param details: Details to set for the job :type details: dict or list :param instance_type: Instance type on which the jobs will be run, or a dict mapping function names to instance type requests :type instance_type: string or dict :param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID :type depends_on: list :param allow_ssh: List of hostname or IP masks to allow SSH connections from :type allow_ssh: list :param debug: Configuration options for job debugging :type debug: dict :param delay_workspace_destruction: Whether to keep the job's temporary workspace around for debugging purposes for 3 days after it succeeds or fails :type delay_workspace_destruction: boolean :param priority: Priority level to request for all jobs created in the execution tree, either "normal" or "high" :type priority: string :param ignore_reuse: Disable job reuse for this execution :type ignore_reuse: boolean :param ignore_reuse_stages: Stages of a workflow (IDs, names, or indices) or "*" for which job reuse should be disabled :type ignore_reuse_stages: list :param extra_args: If provided, a hash of options that will be merged into the underlying JSON given for the API call :type extra_args: dict :returns: Object handler of the newly created job :rtype: :class:`~dxpy.bindings.dxjob.DXJob` Creates a new job that executes the function "main" of this executable with the given input *executable_input*.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxapplet.py#L158-L227
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxapplet.py
DXApplet._new
def _new(self, dx_hash, **kwargs): ''' :param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes. :type dx_hash: dict :param runSpec: Run specification :type runSpec: dict :param dxapi: API version string :type dxapi: string :param inputSpec: Input specification (optional) :type inputSpec: dict :param outputSpec: Output specification (optional) :type outputSpec: dict :param access: Access specification (optional) :type access: dict :param title: Title string (optional) :type title: string :param summary: Summary string (optional) :type summary: string :param description: Description string (optional) :type description: string .. note:: It is highly recommended that the higher-level module :mod:`dxpy.app_builder` or (preferably) its frontend `dx build <https://wiki.dnanexus.com/Command-Line-Client/Index-of-dx-Commands#build>`_ be used instead for applet creation. Creates an applet with the given parameters. See the API documentation for the `/applet/new <https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method:-/applet/new>`_ method for more info. The applet is not run until :meth:`run()` is called. ''' for field in 'runSpec', 'dxapi': if field not in kwargs: raise DXError("%s: Keyword argument %s is required" % (self.__class__.__name__, field)) dx_hash[field] = kwargs[field] del kwargs[field] for field in 'inputSpec', 'outputSpec', 'access', 'title', 'summary', 'description': if field in kwargs: dx_hash[field] = kwargs[field] del kwargs[field] resp = dxpy.api.applet_new(dx_hash, **kwargs) self.set_ids(resp["id"], dx_hash["project"])
python
def _new(self, dx_hash, **kwargs): ''' :param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes. :type dx_hash: dict :param runSpec: Run specification :type runSpec: dict :param dxapi: API version string :type dxapi: string :param inputSpec: Input specification (optional) :type inputSpec: dict :param outputSpec: Output specification (optional) :type outputSpec: dict :param access: Access specification (optional) :type access: dict :param title: Title string (optional) :type title: string :param summary: Summary string (optional) :type summary: string :param description: Description string (optional) :type description: string .. note:: It is highly recommended that the higher-level module :mod:`dxpy.app_builder` or (preferably) its frontend `dx build <https://wiki.dnanexus.com/Command-Line-Client/Index-of-dx-Commands#build>`_ be used instead for applet creation. Creates an applet with the given parameters. See the API documentation for the `/applet/new <https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method:-/applet/new>`_ method for more info. The applet is not run until :meth:`run()` is called. ''' for field in 'runSpec', 'dxapi': if field not in kwargs: raise DXError("%s: Keyword argument %s is required" % (self.__class__.__name__, field)) dx_hash[field] = kwargs[field] del kwargs[field] for field in 'inputSpec', 'outputSpec', 'access', 'title', 'summary', 'description': if field in kwargs: dx_hash[field] = kwargs[field] del kwargs[field] resp = dxpy.api.applet_new(dx_hash, **kwargs) self.set_ids(resp["id"], dx_hash["project"])
:param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes. :type dx_hash: dict :param runSpec: Run specification :type runSpec: dict :param dxapi: API version string :type dxapi: string :param inputSpec: Input specification (optional) :type inputSpec: dict :param outputSpec: Output specification (optional) :type outputSpec: dict :param access: Access specification (optional) :type access: dict :param title: Title string (optional) :type title: string :param summary: Summary string (optional) :type summary: string :param description: Description string (optional) :type description: string .. note:: It is highly recommended that the higher-level module :mod:`dxpy.app_builder` or (preferably) its frontend `dx build <https://wiki.dnanexus.com/Command-Line-Client/Index-of-dx-Commands#build>`_ be used instead for applet creation. Creates an applet with the given parameters. See the API documentation for the `/applet/new <https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method:-/applet/new>`_ method for more info. The applet is not run until :meth:`run()` is called.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxapplet.py#L307-L351
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxapplet.py
DXApplet.run
def run(self, applet_input, *args, **kwargs): """ Creates a new job that executes the function "main" of this applet with the given input *applet_input*. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for the available args. """ # Rename applet_input arg to preserve API compatibility when calling # DXApplet.run(applet_input=...) return super(DXApplet, self).run(applet_input, *args, **kwargs)
python
def run(self, applet_input, *args, **kwargs): """ Creates a new job that executes the function "main" of this applet with the given input *applet_input*. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for the available args. """ # Rename applet_input arg to preserve API compatibility when calling # DXApplet.run(applet_input=...) return super(DXApplet, self).run(applet_input, *args, **kwargs)
Creates a new job that executes the function "main" of this applet with the given input *applet_input*. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for the available args.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxapplet.py#L384-L394
dnanexus/dx-toolkit
src/python/dxpy/cli/parsers.py
set_env_from_args
def set_env_from_args(args): ''' Sets the environment variables for this process from arguments (argparse.Namespace) and calls dxpy._initialize() to reset any values that it has already set. ''' args = vars(args) if args.get('apiserver_host') is not None: config['DX_APISERVER_HOST'] = args['apiserver_host'] if args.get('apiserver_port') is not None: config['DX_APISERVER_PORT'] = args['apiserver_port'] if args.get('apiserver_protocol') is not None: config['DX_APISERVER_PROTOCOL'] = args['apiserver_protocol'] if args.get('project_context_id') is not None: config['DX_PROJECT_CONTEXT_ID'] = args['project_context_id'] if args.get('workspace_id') is not None: config['DX_WORKSPACE_ID'] = args['workspace_id'] if args.get('cli_wd') is not None: config['DX_CLI_WD'] = args['cli_wd'] if args.get('security_context') is not None: config['DX_SECURITY_CONTEXT'] = args['security_context'] if args.get('auth_token') is not None: config['DX_SECURITY_CONTEXT'] = json.dumps({"auth_token": args['auth_token'], "auth_token_type": "Bearer"})
python
def set_env_from_args(args): ''' Sets the environment variables for this process from arguments (argparse.Namespace) and calls dxpy._initialize() to reset any values that it has already set. ''' args = vars(args) if args.get('apiserver_host') is not None: config['DX_APISERVER_HOST'] = args['apiserver_host'] if args.get('apiserver_port') is not None: config['DX_APISERVER_PORT'] = args['apiserver_port'] if args.get('apiserver_protocol') is not None: config['DX_APISERVER_PROTOCOL'] = args['apiserver_protocol'] if args.get('project_context_id') is not None: config['DX_PROJECT_CONTEXT_ID'] = args['project_context_id'] if args.get('workspace_id') is not None: config['DX_WORKSPACE_ID'] = args['workspace_id'] if args.get('cli_wd') is not None: config['DX_CLI_WD'] = args['cli_wd'] if args.get('security_context') is not None: config['DX_SECURITY_CONTEXT'] = args['security_context'] if args.get('auth_token') is not None: config['DX_SECURITY_CONTEXT'] = json.dumps({"auth_token": args['auth_token'], "auth_token_type": "Bearer"})
Sets the environment variables for this process from arguments (argparse.Namespace) and calls dxpy._initialize() to reset any values that it has already set.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/cli/parsers.py#L186-L208
dnanexus/dx-toolkit
src/python/dxpy/asset_builder.py
validate_conf
def validate_conf(asset_conf): """ Validates the contents of the conf file and makes sure that the required information is provided. { "name": "asset_library_name", "title": "A human readable name", "description": " A detailed description abput the asset", "version": "0.0.1", "distribution": "Ubuntu",# (Optional) "release": "12.04", "execDepends": [ {"name": "samtools", "package_manager": "apt"}, {"name": "bamtools"}, {"name": "bio", "package_manager": "gem", "version": "1.4.3"}, {"name": "pysam","package_manager": "pip", "version": "0.7.4"}, {"name": "Bio::SeqIO", "package_manager": "cpan", "version": "1.006924"} ] } """ if 'name' not in asset_conf: raise AssetBuilderException('The asset configuration does not contain the required field "name".') # TODO: this default is not a good idea, and we will have to remove it once we ask customers to always provide release if 'release' not in asset_conf: asset_conf['release'] = "12.04" elif asset_conf['release'] not in ['16.04', '14.04', '12.04']: raise AssetBuilderException('The "release" field value should be either "12.04" (DEPRECATED), "14.04", "16.04".') if 'version' not in asset_conf: raise AssetBuilderException('The asset configuration does not contain the required field "version". ') if 'title' not in asset_conf: raise AssetBuilderException('The asset configuration does not contain the required field "title". ') if 'description' not in asset_conf: raise AssetBuilderException('The asset configuration does not contain the required field "description".') if 'distribution' in asset_conf: if asset_conf['distribution'] != 'Ubuntu': raise AssetBuilderException('The distribution may only take the value "Ubuntu".') else: asset_conf['distribution'] = "Ubuntu"
python
def validate_conf(asset_conf): """ Validates the contents of the conf file and makes sure that the required information is provided. { "name": "asset_library_name", "title": "A human readable name", "description": " A detailed description abput the asset", "version": "0.0.1", "distribution": "Ubuntu",# (Optional) "release": "12.04", "execDepends": [ {"name": "samtools", "package_manager": "apt"}, {"name": "bamtools"}, {"name": "bio", "package_manager": "gem", "version": "1.4.3"}, {"name": "pysam","package_manager": "pip", "version": "0.7.4"}, {"name": "Bio::SeqIO", "package_manager": "cpan", "version": "1.006924"} ] } """ if 'name' not in asset_conf: raise AssetBuilderException('The asset configuration does not contain the required field "name".') # TODO: this default is not a good idea, and we will have to remove it once we ask customers to always provide release if 'release' not in asset_conf: asset_conf['release'] = "12.04" elif asset_conf['release'] not in ['16.04', '14.04', '12.04']: raise AssetBuilderException('The "release" field value should be either "12.04" (DEPRECATED), "14.04", "16.04".') if 'version' not in asset_conf: raise AssetBuilderException('The asset configuration does not contain the required field "version". ') if 'title' not in asset_conf: raise AssetBuilderException('The asset configuration does not contain the required field "title". ') if 'description' not in asset_conf: raise AssetBuilderException('The asset configuration does not contain the required field "description".') if 'distribution' in asset_conf: if asset_conf['distribution'] != 'Ubuntu': raise AssetBuilderException('The distribution may only take the value "Ubuntu".') else: asset_conf['distribution'] = "Ubuntu"
Validates the contents of the conf file and makes sure that the required information is provided. { "name": "asset_library_name", "title": "A human readable name", "description": " A detailed description abput the asset", "version": "0.0.1", "distribution": "Ubuntu",# (Optional) "release": "12.04", "execDepends": [ {"name": "samtools", "package_manager": "apt"}, {"name": "bamtools"}, {"name": "bio", "package_manager": "gem", "version": "1.4.3"}, {"name": "pysam","package_manager": "pip", "version": "0.7.4"}, {"name": "Bio::SeqIO", "package_manager": "cpan", "version": "1.006924"} ] }
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/asset_builder.py#L59-L97
dnanexus/dx-toolkit
src/python/dxpy/asset_builder.py
get_asset_tarball
def get_asset_tarball(asset_name, src_dir, dest_project, dest_folder, json_out): """ If the src_dir contains a "resources" directory its contents are archived and the archived file is uploaded to the platform """ if os.path.isdir(os.path.join(src_dir, "resources")): temp_dir = tempfile.mkdtemp() try: resource_file = os.path.join(temp_dir, asset_name + "_resources.tar.gz") cmd = ["tar", "-czf", resource_file, "-C", os.path.join(src_dir, "resources"), "."] subprocess.check_call(cmd) file_id = dx_upload(resource_file, dest_project, dest_folder, json_out) return file_id finally: shutil.rmtree(temp_dir)
python
def get_asset_tarball(asset_name, src_dir, dest_project, dest_folder, json_out): """ If the src_dir contains a "resources" directory its contents are archived and the archived file is uploaded to the platform """ if os.path.isdir(os.path.join(src_dir, "resources")): temp_dir = tempfile.mkdtemp() try: resource_file = os.path.join(temp_dir, asset_name + "_resources.tar.gz") cmd = ["tar", "-czf", resource_file, "-C", os.path.join(src_dir, "resources"), "."] subprocess.check_call(cmd) file_id = dx_upload(resource_file, dest_project, dest_folder, json_out) return file_id finally: shutil.rmtree(temp_dir)
If the src_dir contains a "resources" directory its contents are archived and the archived file is uploaded to the platform
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/asset_builder.py#L145-L159
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
pick
def pick(choices, default=None, str_choices=None, prompt=None, allow_mult=False, more_choices=False): ''' :param choices: Strings between which the user will make a choice :type choices: list of strings :param default: Number the index to be used as the default :type default: int or None :param str_choices: Strings to be used as aliases for the choices; must be of the same length as choices and each string must be unique :type str_choices: list of strings :param prompt: A custom prompt to be used :type prompt: string :param allow_mult: Whether "*" is a valid option to select all choices :type allow_mult: boolean :param more_choices: Whether "m" is a valid option to ask for more options :type more_choices: boolean :returns: The user's choice, i.e. one of a numbered index of choices (e.g. 0 for the first item), "*" (only if allow_mult is True), or "m" (only if more_results is True) :rtype: int or string :raises: :exc:`EOFError` to signify quitting the process At most one of allow_mult and more_choices should be set to True. ''' for i in range(len(choices)): prefix = str(i) + ') ' lines = choices[i].split("\n") joiner = "\n" + " " * len(prefix) print(prefix + joiner.join(lines)) if more_choices: print('m) More options not shown...') print('') if prompt is None: prompt = 'Pick a numbered choice' if allow_mult: prompt += ' or "*" for all' elif more_choices: prompt += ' or "m" for more options' if default is not None: prompt += ' [' + str(default) + ']' prompt += ': ' while True: try: value = input(prompt) except KeyboardInterrupt: print('') raise except EOFError: print('') raise if default is not None and value == '': return default if allow_mult and value == '*': return value if more_choices and value == 'm': return value try: choice = str_choices.index(value) return choice except: pass try: choice = int(value) if choice not in range(len(choices)): raise IndexError() return choice except Exception: print('Not a valid selection')
python
def pick(choices, default=None, str_choices=None, prompt=None, allow_mult=False, more_choices=False): ''' :param choices: Strings between which the user will make a choice :type choices: list of strings :param default: Number the index to be used as the default :type default: int or None :param str_choices: Strings to be used as aliases for the choices; must be of the same length as choices and each string must be unique :type str_choices: list of strings :param prompt: A custom prompt to be used :type prompt: string :param allow_mult: Whether "*" is a valid option to select all choices :type allow_mult: boolean :param more_choices: Whether "m" is a valid option to ask for more options :type more_choices: boolean :returns: The user's choice, i.e. one of a numbered index of choices (e.g. 0 for the first item), "*" (only if allow_mult is True), or "m" (only if more_results is True) :rtype: int or string :raises: :exc:`EOFError` to signify quitting the process At most one of allow_mult and more_choices should be set to True. ''' for i in range(len(choices)): prefix = str(i) + ') ' lines = choices[i].split("\n") joiner = "\n" + " " * len(prefix) print(prefix + joiner.join(lines)) if more_choices: print('m) More options not shown...') print('') if prompt is None: prompt = 'Pick a numbered choice' if allow_mult: prompt += ' or "*" for all' elif more_choices: prompt += ' or "m" for more options' if default is not None: prompt += ' [' + str(default) + ']' prompt += ': ' while True: try: value = input(prompt) except KeyboardInterrupt: print('') raise except EOFError: print('') raise if default is not None and value == '': return default if allow_mult and value == '*': return value if more_choices and value == 'm': return value try: choice = str_choices.index(value) return choice except: pass try: choice = int(value) if choice not in range(len(choices)): raise IndexError() return choice except Exception: print('Not a valid selection')
:param choices: Strings between which the user will make a choice :type choices: list of strings :param default: Number the index to be used as the default :type default: int or None :param str_choices: Strings to be used as aliases for the choices; must be of the same length as choices and each string must be unique :type str_choices: list of strings :param prompt: A custom prompt to be used :type prompt: string :param allow_mult: Whether "*" is a valid option to select all choices :type allow_mult: boolean :param more_choices: Whether "m" is a valid option to ask for more options :type more_choices: boolean :returns: The user's choice, i.e. one of a numbered index of choices (e.g. 0 for the first item), "*" (only if allow_mult is True), or "m" (only if more_results is True) :rtype: int or string :raises: :exc:`EOFError` to signify quitting the process At most one of allow_mult and more_choices should be set to True.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L36-L99
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
object_exists_in_project
def object_exists_in_project(obj_id, proj_id): ''' :param obj_id: object ID :type obj_id: str :param proj_id: project ID :type proj_id: str Returns True if the specified data object can be found in the specified project. ''' if obj_id is None: raise ValueError("Expected obj_id to be a string") if proj_id is None: raise ValueError("Expected proj_id to be a string") if not is_container_id(proj_id): raise ValueError('Expected %r to be a container ID' % (proj_id,)) return try_call(dxpy.DXHTTPRequest, '/' + obj_id + '/describe', {'project': proj_id})['project'] == proj_id
python
def object_exists_in_project(obj_id, proj_id): ''' :param obj_id: object ID :type obj_id: str :param proj_id: project ID :type proj_id: str Returns True if the specified data object can be found in the specified project. ''' if obj_id is None: raise ValueError("Expected obj_id to be a string") if proj_id is None: raise ValueError("Expected proj_id to be a string") if not is_container_id(proj_id): raise ValueError('Expected %r to be a container ID' % (proj_id,)) return try_call(dxpy.DXHTTPRequest, '/' + obj_id + '/describe', {'project': proj_id})['project'] == proj_id
:param obj_id: object ID :type obj_id: str :param proj_id: project ID :type proj_id: str Returns True if the specified data object can be found in the specified project.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L195-L211
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
get_last_pos_of_char
def get_last_pos_of_char(char, string): ''' :param char: The character to find :type char: string :param string: The string in which to search for *char* :type string: string :returns: Index in *string* where *char* last appears (unescaped by a preceding "\\"), -1 if not found :rtype: int Finds the last occurrence of *char* in *string* in which *char* is not present as an escaped character. ''' pos = len(string) while pos > 0: pos = string[:pos].rfind(char) if pos == -1: return -1 num_backslashes = 0 test_index = pos - 1 while test_index >= 0 and string[test_index] == '\\': num_backslashes += 1 test_index -= 1 if num_backslashes % 2 == 0: return pos return -1
python
def get_last_pos_of_char(char, string): ''' :param char: The character to find :type char: string :param string: The string in which to search for *char* :type string: string :returns: Index in *string* where *char* last appears (unescaped by a preceding "\\"), -1 if not found :rtype: int Finds the last occurrence of *char* in *string* in which *char* is not present as an escaped character. ''' pos = len(string) while pos > 0: pos = string[:pos].rfind(char) if pos == -1: return -1 num_backslashes = 0 test_index = pos - 1 while test_index >= 0 and string[test_index] == '\\': num_backslashes += 1 test_index -= 1 if num_backslashes % 2 == 0: return pos return -1
:param char: The character to find :type char: string :param string: The string in which to search for *char* :type string: string :returns: Index in *string* where *char* last appears (unescaped by a preceding "\\"), -1 if not found :rtype: int Finds the last occurrence of *char* in *string* in which *char* is not present as an escaped character.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L233-L258
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
get_first_pos_of_char
def get_first_pos_of_char(char, string): ''' :param char: The character to find :type char: string :param string: The string in which to search for *char* :type string: string :returns: Index in *string* where *char* last appears (unescaped by a preceding "\\"), -1 if not found :rtype: int Finds the first occurrence of *char* in *string* in which *char* is not present as an escaped character. ''' first_pos = -1 pos = len(string) while pos > 0: pos = string[:pos].rfind(char) if pos == -1: return first_pos num_backslashes = 0 test_index = pos - 1 while test_index >= 0 and string[test_index] == '\\': num_backslashes += 1 test_index -= 1 if num_backslashes % 2 == 0: first_pos = pos return first_pos
python
def get_first_pos_of_char(char, string): ''' :param char: The character to find :type char: string :param string: The string in which to search for *char* :type string: string :returns: Index in *string* where *char* last appears (unescaped by a preceding "\\"), -1 if not found :rtype: int Finds the first occurrence of *char* in *string* in which *char* is not present as an escaped character. ''' first_pos = -1 pos = len(string) while pos > 0: pos = string[:pos].rfind(char) if pos == -1: return first_pos num_backslashes = 0 test_index = pos - 1 while test_index >= 0 and string[test_index] == '\\': num_backslashes += 1 test_index -= 1 if num_backslashes % 2 == 0: first_pos = pos return first_pos
:param char: The character to find :type char: string :param string: The string in which to search for *char* :type string: string :returns: Index in *string* where *char* last appears (unescaped by a preceding "\\"), -1 if not found :rtype: int Finds the first occurrence of *char* in *string* in which *char* is not present as an escaped character.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L260-L286
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
split_unescaped
def split_unescaped(char, string, include_empty_strings=False): ''' :param char: The character on which to split the string :type char: string :param string: The string to split :type string: string :returns: List of substrings of *string* :rtype: list of strings Splits *string* whenever *char* appears without an odd number of backslashes ('\\') preceding it, discarding any empty string elements. ''' words = [] pos = len(string) lastpos = pos while pos >= 0: pos = get_last_pos_of_char(char, string[:lastpos]) if pos >= 0: if pos + 1 != lastpos or include_empty_strings: words.append(string[pos + 1: lastpos]) lastpos = pos if lastpos != 0 or include_empty_strings: words.append(string[:lastpos]) words.reverse() return words
python
def split_unescaped(char, string, include_empty_strings=False): ''' :param char: The character on which to split the string :type char: string :param string: The string to split :type string: string :returns: List of substrings of *string* :rtype: list of strings Splits *string* whenever *char* appears without an odd number of backslashes ('\\') preceding it, discarding any empty string elements. ''' words = [] pos = len(string) lastpos = pos while pos >= 0: pos = get_last_pos_of_char(char, string[:lastpos]) if pos >= 0: if pos + 1 != lastpos or include_empty_strings: words.append(string[pos + 1: lastpos]) lastpos = pos if lastpos != 0 or include_empty_strings: words.append(string[:lastpos]) words.reverse() return words
:param char: The character on which to split the string :type char: string :param string: The string to split :type string: string :returns: List of substrings of *string* :rtype: list of strings Splits *string* whenever *char* appears without an odd number of backslashes ('\\') preceding it, discarding any empty string elements.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L288-L314
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
clean_folder_path
def clean_folder_path(path, expected=None): ''' :param path: A folder path to sanitize and parse :type path: string :param expected: Whether a folder ("folder"), a data object ("entity"), or either (None) is expected :type expected: string or None :returns: *folderpath*, *name* Unescape and parse *path* as a folder path to possibly an entity name. Consecutive unescaped forward slashes "/" are collapsed to a single forward slash. If *expected* is "folder", *name* is always returned as None. Otherwise, the string to the right of the last unescaped "/" is considered a possible data object name and returned as such. ''' folders = split_unescaped('/', path) if len(folders) == 0: return '/', None if expected == 'folder' or folders[-1] == '.' or folders[-1] == '..' or get_last_pos_of_char('/', path) == len(path) - 1: entity_name = None else: entity_name = unescape_name_str(folders.pop()) sanitized_folders = [] for folder in folders: if folder == '.': pass elif folder == '..': if len(sanitized_folders) > 0: sanitized_folders.pop() else: sanitized_folders.append(unescape_folder_str(folder)) return ('/' + '/'.join(sanitized_folders)), entity_name
python
def clean_folder_path(path, expected=None): ''' :param path: A folder path to sanitize and parse :type path: string :param expected: Whether a folder ("folder"), a data object ("entity"), or either (None) is expected :type expected: string or None :returns: *folderpath*, *name* Unescape and parse *path* as a folder path to possibly an entity name. Consecutive unescaped forward slashes "/" are collapsed to a single forward slash. If *expected* is "folder", *name* is always returned as None. Otherwise, the string to the right of the last unescaped "/" is considered a possible data object name and returned as such. ''' folders = split_unescaped('/', path) if len(folders) == 0: return '/', None if expected == 'folder' or folders[-1] == '.' or folders[-1] == '..' or get_last_pos_of_char('/', path) == len(path) - 1: entity_name = None else: entity_name = unescape_name_str(folders.pop()) sanitized_folders = [] for folder in folders: if folder == '.': pass elif folder == '..': if len(sanitized_folders) > 0: sanitized_folders.pop() else: sanitized_folders.append(unescape_folder_str(folder)) return ('/' + '/'.join(sanitized_folders)), entity_name
:param path: A folder path to sanitize and parse :type path: string :param expected: Whether a folder ("folder"), a data object ("entity"), or either (None) is expected :type expected: string or None :returns: *folderpath*, *name* Unescape and parse *path* as a folder path to possibly an entity name. Consecutive unescaped forward slashes "/" are collapsed to a single forward slash. If *expected* is "folder", *name* is always returned as None. Otherwise, the string to the right of the last unescaped "/" is considered a possible data object name and returned as such.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L317-L354
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
resolve_container_id_or_name
def resolve_container_id_or_name(raw_string, is_error=False, multi=False): ''' :param raw_string: A potential project or container ID or name :type raw_string: string :param is_error: Whether to raise an exception if the project or container ID cannot be resolved :type is_error: boolean :returns: Project or container ID if found or else None :rtype: string or None :raises: :exc:`ResolutionError` if *is_error* is True and the project or container could not be resolved Unescapes and attempts to resolve *raw_string* to a project or container ID. ''' string = unescape_name_str(raw_string) if is_container_id(string): return ([string] if multi else string) if string in cached_project_names: return ([cached_project_names[string]] if multi else cached_project_names[string]) try: results = list(dxpy.find_projects(name=string, describe=True, level='VIEW')) except Exception as details: raise ResolutionError(str(details)) if len(results) == 1: cached_project_names[string] = results[0]['id'] return ([results[0]['id']] if multi else results[0]['id']) elif len(results) == 0: if is_error: raise ResolutionError('Could not find a project named "' + string + '"') return ([] if multi else None) elif not multi: if INTERACTIVE_CLI: print('Found multiple projects with name "' + string + '"') choice = pick(['{id} ({level})'.format(id=result['id'], level=result['level']) for result in results]) return results[choice]['id'] else: raise ResolutionError('Found multiple projects with name "' + string + '"; please use a project ID to specify the desired project') else: # len(results) > 1 and multi return [result['id'] for result in results]
python
def resolve_container_id_or_name(raw_string, is_error=False, multi=False): ''' :param raw_string: A potential project or container ID or name :type raw_string: string :param is_error: Whether to raise an exception if the project or container ID cannot be resolved :type is_error: boolean :returns: Project or container ID if found or else None :rtype: string or None :raises: :exc:`ResolutionError` if *is_error* is True and the project or container could not be resolved Unescapes and attempts to resolve *raw_string* to a project or container ID. ''' string = unescape_name_str(raw_string) if is_container_id(string): return ([string] if multi else string) if string in cached_project_names: return ([cached_project_names[string]] if multi else cached_project_names[string]) try: results = list(dxpy.find_projects(name=string, describe=True, level='VIEW')) except Exception as details: raise ResolutionError(str(details)) if len(results) == 1: cached_project_names[string] = results[0]['id'] return ([results[0]['id']] if multi else results[0]['id']) elif len(results) == 0: if is_error: raise ResolutionError('Could not find a project named "' + string + '"') return ([] if multi else None) elif not multi: if INTERACTIVE_CLI: print('Found multiple projects with name "' + string + '"') choice = pick(['{id} ({level})'.format(id=result['id'], level=result['level']) for result in results]) return results[choice]['id'] else: raise ResolutionError('Found multiple projects with name "' + string + '"; please use a project ID to specify the desired project') else: # len(results) > 1 and multi return [result['id'] for result in results]
:param raw_string: A potential project or container ID or name :type raw_string: string :param is_error: Whether to raise an exception if the project or container ID cannot be resolved :type is_error: boolean :returns: Project or container ID if found or else None :rtype: string or None :raises: :exc:`ResolutionError` if *is_error* is True and the project or container could not be resolved Unescapes and attempts to resolve *raw_string* to a project or container ID.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L357-L402
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
resolve_path
def resolve_path(path, expected=None, multi_projects=False, allow_empty_string=True): ''' :param path: A path to a data object to attempt to resolve :type path: string :param expected: one of the following: "folder", "entity", or None to indicate whether the expected path is a folder, a data object, or either :type expected: string or None :returns: A tuple of 3 values: container_ID, folderpath, entity_name :rtype: string, string, string :raises: exc:`ResolutionError` if the project cannot be resolved by name or the path is malformed :param allow_empty_string: If false, a ResolutionError will be raised if *path* is an empty string. Use this when resolving the empty string could result in unexpected behavior. :type allow_empty_string: boolean Attempts to resolve *path* to a project or container ID, a folder path, and a data object or folder name. This method will NOT raise an exception if the specified folder or object does not exist. This method is primarily for parsing purposes. Returns one of the following: (project, folder, maybe_name) where project is a container ID (non-null) folder is a folder path maybe_name is a string if the path could represent a folder or an object, or maybe_name is None if the path could only represent a folder OR (maybe_project, None, object_id) where maybe_project is a container ID or None object_id is a dataobject, app, or execution (specified by ID, not name) OR (job_id, None, output_name) where job_id and output_name are both non-null ''' # TODO: callers that intend to obtain a data object probably won't be happy # with an app or execution ID. Callers should probably have to specify # whether they are okay with getting an execution ID or not. # TODO: callers that are looking for a place to write data, rather than # read it, probably won't be happy with receiving an object ID, or a # JBOR. Callers should probably specify whether they are looking for an # "LHS" expression or not. if '_DX_FUSE' in os.environ: from xattr import xattr path = xattr(path)['project'] + ":" + xattr(path)['id'] if path == '' and not allow_empty_string: raise ResolutionError('Cannot parse ""; expected the path to be a non-empty string') path = _maybe_convert_stringified_dxlink(path) # Easy case: ":" if path == ':': if dxpy.WORKSPACE_ID is None: raise ResolutionError("Cannot resolve \":\": expected a project name or ID " "to the left of the colon, or for a current project to be set") return ([dxpy.WORKSPACE_ID] if multi_projects else dxpy.WORKSPACE_ID), '/', None # Second easy case: empty string if path == '': if dxpy.WORKSPACE_ID is None: raise ResolutionError('Expected a project name or ID to the left of a colon, ' 'or for a current project to be set') return ([dxpy.WORKSPACE_ID] if multi_projects else dxpy.WORKSPACE_ID), dxpy.config.get('DX_CLI_WD', '/'), None # Third easy case: hash ID if is_container_id(path): return ([path] if multi_projects else path), '/', None elif is_hashid(path): return ([dxpy.WORKSPACE_ID] if multi_projects else dxpy.WORKSPACE_ID), None, path # using a numerical sentinel value to indicate that it hasn't been # set in case dxpy.WORKSPACE_ID is actually None project = 0 folderpath = None entity_name = None wd = dxpy.config.get('DX_CLI_WD', u'/') # Test for multiple colons last_colon = get_last_pos_of_char(':', path) if last_colon >= 0: last_last_colon = get_last_pos_of_char(':', path[:last_colon]) if last_last_colon >= 0: raise ResolutionError('Cannot parse "' + path + '" as a path; at most one unescaped colon can be present') substrings = split_unescaped(':', path) if len(substrings) == 2: # One of the following: # 1) job-id:fieldname # 2) project-name-or-id:folderpath/to/possible/entity if is_job_id(substrings[0]): return ([substrings[0]] if multi_projects else substrings[0]), None, substrings[1] if multi_projects: project_ids = resolve_container_id_or_name(substrings[0], is_error=True, multi=True) else: project = resolve_container_id_or_name(substrings[0], is_error=True) wd = '/' elif get_last_pos_of_char(':', path) >= 0: # :folderpath/to/possible/entity OR project-name-or-id: # Colon is either at the beginning or at the end wd = '/' if path.startswith(':'): if dxpy.WORKSPACE_ID is None: raise ResolutionError('Cannot resolve "%s": expected a project name or ID to the left of the ' 'colon, or for a current project to be set' % (path,)) project = dxpy.WORKSPACE_ID else: # One nonempty string to the left of a colon project = resolve_container_id_or_name(substrings[0], is_error=True) folderpath = '/' else: # One nonempty string, no colon present, do NOT interpret as # project project = dxpy.WORKSPACE_ID if project is None: raise ResolutionError('Cannot resolve "%s": expected the path to be qualified with a project name or ID, ' 'and a colon; or for a current project to be set' % (path,)) # Determine folderpath and entity_name if necessary if folderpath is None: folderpath = substrings[-1] folderpath, entity_name = clean_folder_path(('' if folderpath.startswith('/') else wd + '/') + folderpath, expected) if multi_projects: return (project_ids if project == 0 else [project]), folderpath, entity_name else: return project, folderpath, entity_name
python
def resolve_path(path, expected=None, multi_projects=False, allow_empty_string=True): ''' :param path: A path to a data object to attempt to resolve :type path: string :param expected: one of the following: "folder", "entity", or None to indicate whether the expected path is a folder, a data object, or either :type expected: string or None :returns: A tuple of 3 values: container_ID, folderpath, entity_name :rtype: string, string, string :raises: exc:`ResolutionError` if the project cannot be resolved by name or the path is malformed :param allow_empty_string: If false, a ResolutionError will be raised if *path* is an empty string. Use this when resolving the empty string could result in unexpected behavior. :type allow_empty_string: boolean Attempts to resolve *path* to a project or container ID, a folder path, and a data object or folder name. This method will NOT raise an exception if the specified folder or object does not exist. This method is primarily for parsing purposes. Returns one of the following: (project, folder, maybe_name) where project is a container ID (non-null) folder is a folder path maybe_name is a string if the path could represent a folder or an object, or maybe_name is None if the path could only represent a folder OR (maybe_project, None, object_id) where maybe_project is a container ID or None object_id is a dataobject, app, or execution (specified by ID, not name) OR (job_id, None, output_name) where job_id and output_name are both non-null ''' # TODO: callers that intend to obtain a data object probably won't be happy # with an app or execution ID. Callers should probably have to specify # whether they are okay with getting an execution ID or not. # TODO: callers that are looking for a place to write data, rather than # read it, probably won't be happy with receiving an object ID, or a # JBOR. Callers should probably specify whether they are looking for an # "LHS" expression or not. if '_DX_FUSE' in os.environ: from xattr import xattr path = xattr(path)['project'] + ":" + xattr(path)['id'] if path == '' and not allow_empty_string: raise ResolutionError('Cannot parse ""; expected the path to be a non-empty string') path = _maybe_convert_stringified_dxlink(path) # Easy case: ":" if path == ':': if dxpy.WORKSPACE_ID is None: raise ResolutionError("Cannot resolve \":\": expected a project name or ID " "to the left of the colon, or for a current project to be set") return ([dxpy.WORKSPACE_ID] if multi_projects else dxpy.WORKSPACE_ID), '/', None # Second easy case: empty string if path == '': if dxpy.WORKSPACE_ID is None: raise ResolutionError('Expected a project name or ID to the left of a colon, ' 'or for a current project to be set') return ([dxpy.WORKSPACE_ID] if multi_projects else dxpy.WORKSPACE_ID), dxpy.config.get('DX_CLI_WD', '/'), None # Third easy case: hash ID if is_container_id(path): return ([path] if multi_projects else path), '/', None elif is_hashid(path): return ([dxpy.WORKSPACE_ID] if multi_projects else dxpy.WORKSPACE_ID), None, path # using a numerical sentinel value to indicate that it hasn't been # set in case dxpy.WORKSPACE_ID is actually None project = 0 folderpath = None entity_name = None wd = dxpy.config.get('DX_CLI_WD', u'/') # Test for multiple colons last_colon = get_last_pos_of_char(':', path) if last_colon >= 0: last_last_colon = get_last_pos_of_char(':', path[:last_colon]) if last_last_colon >= 0: raise ResolutionError('Cannot parse "' + path + '" as a path; at most one unescaped colon can be present') substrings = split_unescaped(':', path) if len(substrings) == 2: # One of the following: # 1) job-id:fieldname # 2) project-name-or-id:folderpath/to/possible/entity if is_job_id(substrings[0]): return ([substrings[0]] if multi_projects else substrings[0]), None, substrings[1] if multi_projects: project_ids = resolve_container_id_or_name(substrings[0], is_error=True, multi=True) else: project = resolve_container_id_or_name(substrings[0], is_error=True) wd = '/' elif get_last_pos_of_char(':', path) >= 0: # :folderpath/to/possible/entity OR project-name-or-id: # Colon is either at the beginning or at the end wd = '/' if path.startswith(':'): if dxpy.WORKSPACE_ID is None: raise ResolutionError('Cannot resolve "%s": expected a project name or ID to the left of the ' 'colon, or for a current project to be set' % (path,)) project = dxpy.WORKSPACE_ID else: # One nonempty string to the left of a colon project = resolve_container_id_or_name(substrings[0], is_error=True) folderpath = '/' else: # One nonempty string, no colon present, do NOT interpret as # project project = dxpy.WORKSPACE_ID if project is None: raise ResolutionError('Cannot resolve "%s": expected the path to be qualified with a project name or ID, ' 'and a colon; or for a current project to be set' % (path,)) # Determine folderpath and entity_name if necessary if folderpath is None: folderpath = substrings[-1] folderpath, entity_name = clean_folder_path(('' if folderpath.startswith('/') else wd + '/') + folderpath, expected) if multi_projects: return (project_ids if project == 0 else [project]), folderpath, entity_name else: return project, folderpath, entity_name
:param path: A path to a data object to attempt to resolve :type path: string :param expected: one of the following: "folder", "entity", or None to indicate whether the expected path is a folder, a data object, or either :type expected: string or None :returns: A tuple of 3 values: container_ID, folderpath, entity_name :rtype: string, string, string :raises: exc:`ResolutionError` if the project cannot be resolved by name or the path is malformed :param allow_empty_string: If false, a ResolutionError will be raised if *path* is an empty string. Use this when resolving the empty string could result in unexpected behavior. :type allow_empty_string: boolean Attempts to resolve *path* to a project or container ID, a folder path, and a data object or folder name. This method will NOT raise an exception if the specified folder or object does not exist. This method is primarily for parsing purposes. Returns one of the following: (project, folder, maybe_name) where project is a container ID (non-null) folder is a folder path maybe_name is a string if the path could represent a folder or an object, or maybe_name is None if the path could only represent a folder OR (maybe_project, None, object_id) where maybe_project is a container ID or None object_id is a dataobject, app, or execution (specified by ID, not name) OR (job_id, None, output_name) where job_id and output_name are both non-null
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L420-L557
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
_check_resolution_needed
def _check_resolution_needed(path, project, folderpath, entity_name, expected_classes=None, describe=True, enclose_in_list=False): """ :param path: Path to the object that required resolution; propagated from command-line :type path: string :param project: The potential project the entity belongs to :type project: string :param folderpath: Path to the entity :type folderpath: string :param entity_name: The name of the entity :type entity_name: string :param expected_classes: A list of expected classes the entity is allowed to belong to if it is an ID (e.g. "record", "file", "job"); if None, then entity_name may be any data object class :type expected_classes: list or None :param describe: Dictionary of inputs to the describe API call; if no describe input is provided (default value True), then an empty mapping is passed to the describe API method :type describe: dict or True :param enclose_in_list: Whether the describe output is to be in the form of a list (if False, the last return value is a dictionary; if True, the last return value is a list of one dictionary); it will only have an effect if entity_name is a DX ID and is described :type enclose_in_list: boolean :returns: Whether or not the entity needs to be resolved with a more general resolution method, the project, the folderpath, and the entity name :rtype: tuple of 4 elements :raises: ResolutionError if the entity fails to be described Attempts to resolve the entity to a folder or an object, and describes the entity iff it is a DX ID of an expected class in the list expected_classes. Otherwise, determines whether or not more general resolution may be able to resolve the entity. If a more general resolution method is needed, then the return values will look like: (True, <project>, <folderpath>, <entity_name>) If the entity is a DX ID, but is not one of the supplied expected classes, then the return values will look like: (False, None, None, None) If the entity can be successfully described, then the return values will look like: <desc_output> ::= {"id": entity_name, "describe": {...}} <desc_or_desc_list> ::= <desc_output> || [<desc_output>] (False, <project>, <folderpath>, <desc_or_desc_list>) If the entity may be a folder, then the return values will look like: (False, <project>, <folderpath>, None) TODO: Allow arbitrary flags for the describe mapping. """ if entity_name is None: # Definitely a folder (or project) # TODO: find a good way to check if folder exists and expected=folder return False, project, folderpath, None elif is_hashid(entity_name): found_valid_class = True if expected_classes is not None: found_valid_class = False for klass in expected_classes: if entity_name.startswith(klass): found_valid_class = True if not found_valid_class: return False, None, None, None if describe is True: describe = {} # entity is an ID of a valid class, try to describe it if 'project' not in describe: if project != dxpy.WORKSPACE_ID: describe['project'] = project elif dxpy.WORKSPACE_ID is not None: describe['project'] = dxpy.WORKSPACE_ID try: desc = dxpy.DXHTTPRequest('/' + entity_name + '/describe', describe) desc = dxpy.append_underlying_workflow_describe(desc) except Exception as details: if 'project' in describe: # Now try it without the hint del describe['project'] try: desc = dxpy.DXHTTPRequest('/' + entity_name + '/describe', describe) except Exception as details2: raise ResolutionError(str(details2)) else: raise ResolutionError(str(details)) result = {"id": entity_name, "describe": desc} if enclose_in_list: return False, project, folderpath, [result] else: return False, project, folderpath, result else: # Need to resolve later return True, project, folderpath, entity_name
python
def _check_resolution_needed(path, project, folderpath, entity_name, expected_classes=None, describe=True, enclose_in_list=False): """ :param path: Path to the object that required resolution; propagated from command-line :type path: string :param project: The potential project the entity belongs to :type project: string :param folderpath: Path to the entity :type folderpath: string :param entity_name: The name of the entity :type entity_name: string :param expected_classes: A list of expected classes the entity is allowed to belong to if it is an ID (e.g. "record", "file", "job"); if None, then entity_name may be any data object class :type expected_classes: list or None :param describe: Dictionary of inputs to the describe API call; if no describe input is provided (default value True), then an empty mapping is passed to the describe API method :type describe: dict or True :param enclose_in_list: Whether the describe output is to be in the form of a list (if False, the last return value is a dictionary; if True, the last return value is a list of one dictionary); it will only have an effect if entity_name is a DX ID and is described :type enclose_in_list: boolean :returns: Whether or not the entity needs to be resolved with a more general resolution method, the project, the folderpath, and the entity name :rtype: tuple of 4 elements :raises: ResolutionError if the entity fails to be described Attempts to resolve the entity to a folder or an object, and describes the entity iff it is a DX ID of an expected class in the list expected_classes. Otherwise, determines whether or not more general resolution may be able to resolve the entity. If a more general resolution method is needed, then the return values will look like: (True, <project>, <folderpath>, <entity_name>) If the entity is a DX ID, but is not one of the supplied expected classes, then the return values will look like: (False, None, None, None) If the entity can be successfully described, then the return values will look like: <desc_output> ::= {"id": entity_name, "describe": {...}} <desc_or_desc_list> ::= <desc_output> || [<desc_output>] (False, <project>, <folderpath>, <desc_or_desc_list>) If the entity may be a folder, then the return values will look like: (False, <project>, <folderpath>, None) TODO: Allow arbitrary flags for the describe mapping. """ if entity_name is None: # Definitely a folder (or project) # TODO: find a good way to check if folder exists and expected=folder return False, project, folderpath, None elif is_hashid(entity_name): found_valid_class = True if expected_classes is not None: found_valid_class = False for klass in expected_classes: if entity_name.startswith(klass): found_valid_class = True if not found_valid_class: return False, None, None, None if describe is True: describe = {} # entity is an ID of a valid class, try to describe it if 'project' not in describe: if project != dxpy.WORKSPACE_ID: describe['project'] = project elif dxpy.WORKSPACE_ID is not None: describe['project'] = dxpy.WORKSPACE_ID try: desc = dxpy.DXHTTPRequest('/' + entity_name + '/describe', describe) desc = dxpy.append_underlying_workflow_describe(desc) except Exception as details: if 'project' in describe: # Now try it without the hint del describe['project'] try: desc = dxpy.DXHTTPRequest('/' + entity_name + '/describe', describe) except Exception as details2: raise ResolutionError(str(details2)) else: raise ResolutionError(str(details)) result = {"id": entity_name, "describe": desc} if enclose_in_list: return False, project, folderpath, [result] else: return False, project, folderpath, result else: # Need to resolve later return True, project, folderpath, entity_name
:param path: Path to the object that required resolution; propagated from command-line :type path: string :param project: The potential project the entity belongs to :type project: string :param folderpath: Path to the entity :type folderpath: string :param entity_name: The name of the entity :type entity_name: string :param expected_classes: A list of expected classes the entity is allowed to belong to if it is an ID (e.g. "record", "file", "job"); if None, then entity_name may be any data object class :type expected_classes: list or None :param describe: Dictionary of inputs to the describe API call; if no describe input is provided (default value True), then an empty mapping is passed to the describe API method :type describe: dict or True :param enclose_in_list: Whether the describe output is to be in the form of a list (if False, the last return value is a dictionary; if True, the last return value is a list of one dictionary); it will only have an effect if entity_name is a DX ID and is described :type enclose_in_list: boolean :returns: Whether or not the entity needs to be resolved with a more general resolution method, the project, the folderpath, and the entity name :rtype: tuple of 4 elements :raises: ResolutionError if the entity fails to be described Attempts to resolve the entity to a folder or an object, and describes the entity iff it is a DX ID of an expected class in the list expected_classes. Otherwise, determines whether or not more general resolution may be able to resolve the entity. If a more general resolution method is needed, then the return values will look like: (True, <project>, <folderpath>, <entity_name>) If the entity is a DX ID, but is not one of the supplied expected classes, then the return values will look like: (False, None, None, None) If the entity can be successfully described, then the return values will look like: <desc_output> ::= {"id": entity_name, "describe": {...}} <desc_or_desc_list> ::= <desc_output> || [<desc_output>] (False, <project>, <folderpath>, <desc_or_desc_list>) If the entity may be a folder, then the return values will look like: (False, <project>, <folderpath>, None) TODO: Allow arbitrary flags for the describe mapping.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L615-L718
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
_resolve_folder
def _resolve_folder(project, parent_folder, folder_name): """ :param project: The project that the folder belongs to :type project: string :param parent_folder: Full path to the parent folder that contains folder_name :type parent_folder: string :param folder_name: Name of the folder :type folder_name: string :returns: The path to folder_name, if it exists, in the form of "<parent_folder>/<folder_name>" :rtype: string :raises: ResolutionError if folder_name is not a folder, or if folder_name points to a folder that does not exist Attempts to resolve folder_name at location parent_folder in project. """ if '/' in folder_name: # Then there's no way it's supposed to be a folder raise ResolutionError('Object of name ' + str(folder_name) + ' could not be resolved in folder ' + str(parent_folder) + ' of project ID ' + str(project)) possible_folder, _skip = clean_folder_path(parent_folder + '/' + folder_name, 'folder') if not check_folder_exists(project, parent_folder, folder_name): raise ResolutionError('Unable to resolve "' + folder_name + '" to a data object or folder name in \'' + parent_folder + "'") return possible_folder
python
def _resolve_folder(project, parent_folder, folder_name): """ :param project: The project that the folder belongs to :type project: string :param parent_folder: Full path to the parent folder that contains folder_name :type parent_folder: string :param folder_name: Name of the folder :type folder_name: string :returns: The path to folder_name, if it exists, in the form of "<parent_folder>/<folder_name>" :rtype: string :raises: ResolutionError if folder_name is not a folder, or if folder_name points to a folder that does not exist Attempts to resolve folder_name at location parent_folder in project. """ if '/' in folder_name: # Then there's no way it's supposed to be a folder raise ResolutionError('Object of name ' + str(folder_name) + ' could not be resolved in folder ' + str(parent_folder) + ' of project ID ' + str(project)) possible_folder, _skip = clean_folder_path(parent_folder + '/' + folder_name, 'folder') if not check_folder_exists(project, parent_folder, folder_name): raise ResolutionError('Unable to resolve "' + folder_name + '" to a data object or folder name in \'' + parent_folder + "'") return possible_folder
:param project: The project that the folder belongs to :type project: string :param parent_folder: Full path to the parent folder that contains folder_name :type parent_folder: string :param folder_name: Name of the folder :type folder_name: string :returns: The path to folder_name, if it exists, in the form of "<parent_folder>/<folder_name>" :rtype: string :raises: ResolutionError if folder_name is not a folder, or if folder_name points to a folder that does not exist Attempts to resolve folder_name at location parent_folder in project.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L721-L747
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
_validate_resolution_output_length
def _validate_resolution_output_length(path, entity_name, results, allow_mult=False, all_mult=False, ask_to_resolve=True): """ :param path: Path to the object that required resolution; propagated from command-line :type path: string :param entity_name: Name of the object :type entity_name: string :param results: Result of resolution; non-empty list of object specifications (each specification is a dictionary with keys "project" and "id") :type results: list of dictionaries :param allow_mult: If True, it is okay to choose from multiple results of a single resolved object, or return all results found; if False, raise an error if multiple results are found :type allow_mult: boolean :param all_mult: If True, return all results if multiple results are found for a single resolved object; if False, user needs to choose a single result if multiple are found; the value of all_mult only has an effect if allow_mult is True) :type all_mult: boolean :param ask_to_resolve: Whether picking may be necessary (if True, a list is returned; if False, only one result is returned); if specified as True, then all results will be returned, regardless of the values of allow_mult and all_mult :type ask_to_resolve: boolean :returns: The results of resolving entity_name, expected to be of the following form: <resolved_object> # If only one result is present or the user # is able to select from multiple OR [<resolved_object>, ...] # If multiple results are present and # it is allowed where <resolved_object> is of the following form: {"project": <project_id>, "id": <object_id>} :rtype: dict or list of dicts :raises: ValueError if results is empty :raises: ResolutionError if too many results are found and the user is not in interactive mode and cannot select one Precondition: results must be a nonempty list Validates length of results. If there are multiple results found and the user is in interactive mode, then the user will be prompted to select a single result to be returned. """ if len(results) == 0: raise ValueError("'results' must be nonempty.") # Caller wants ALL results, so return the entire results list # At this point, do not care about the values of allow_mult or all_mult if not ask_to_resolve: return results if len(results) > 1: # The other way the caller can specify it wants all results is by setting # allow_mult to be True and allowing all_mult to be True (or if the object name is a glob pattern) if allow_mult and (all_mult or is_glob_pattern(entity_name)): return results if INTERACTIVE_CLI: print('The given path "' + path + '" resolves to the following data objects:') if any(['describe' not in result for result in results]): # findDataObject API call must be made to get 'describe' mappings project, folderpath, entity_name = resolve_path(path, expected='entity') results = _resolve_global_entity(project, folderpath, entity_name) choice = pick([get_ls_l_desc(result['describe']) for result in results], allow_mult=allow_mult) if allow_mult and choice == '*': return results else: return [results[choice]] if allow_mult else results[choice] else: raise ResolutionError('The given path "' + path + '" resolves to ' + str(len(results)) + ' data objects') else: return [results[0]] if allow_mult else results[0]
python
def _validate_resolution_output_length(path, entity_name, results, allow_mult=False, all_mult=False, ask_to_resolve=True): """ :param path: Path to the object that required resolution; propagated from command-line :type path: string :param entity_name: Name of the object :type entity_name: string :param results: Result of resolution; non-empty list of object specifications (each specification is a dictionary with keys "project" and "id") :type results: list of dictionaries :param allow_mult: If True, it is okay to choose from multiple results of a single resolved object, or return all results found; if False, raise an error if multiple results are found :type allow_mult: boolean :param all_mult: If True, return all results if multiple results are found for a single resolved object; if False, user needs to choose a single result if multiple are found; the value of all_mult only has an effect if allow_mult is True) :type all_mult: boolean :param ask_to_resolve: Whether picking may be necessary (if True, a list is returned; if False, only one result is returned); if specified as True, then all results will be returned, regardless of the values of allow_mult and all_mult :type ask_to_resolve: boolean :returns: The results of resolving entity_name, expected to be of the following form: <resolved_object> # If only one result is present or the user # is able to select from multiple OR [<resolved_object>, ...] # If multiple results are present and # it is allowed where <resolved_object> is of the following form: {"project": <project_id>, "id": <object_id>} :rtype: dict or list of dicts :raises: ValueError if results is empty :raises: ResolutionError if too many results are found and the user is not in interactive mode and cannot select one Precondition: results must be a nonempty list Validates length of results. If there are multiple results found and the user is in interactive mode, then the user will be prompted to select a single result to be returned. """ if len(results) == 0: raise ValueError("'results' must be nonempty.") # Caller wants ALL results, so return the entire results list # At this point, do not care about the values of allow_mult or all_mult if not ask_to_resolve: return results if len(results) > 1: # The other way the caller can specify it wants all results is by setting # allow_mult to be True and allowing all_mult to be True (or if the object name is a glob pattern) if allow_mult and (all_mult or is_glob_pattern(entity_name)): return results if INTERACTIVE_CLI: print('The given path "' + path + '" resolves to the following data objects:') if any(['describe' not in result for result in results]): # findDataObject API call must be made to get 'describe' mappings project, folderpath, entity_name = resolve_path(path, expected='entity') results = _resolve_global_entity(project, folderpath, entity_name) choice = pick([get_ls_l_desc(result['describe']) for result in results], allow_mult=allow_mult) if allow_mult and choice == '*': return results else: return [results[choice]] if allow_mult else results[choice] else: raise ResolutionError('The given path "' + path + '" resolves to ' + str(len(results)) + ' data objects') else: return [results[0]] if allow_mult else results[0]
:param path: Path to the object that required resolution; propagated from command-line :type path: string :param entity_name: Name of the object :type entity_name: string :param results: Result of resolution; non-empty list of object specifications (each specification is a dictionary with keys "project" and "id") :type results: list of dictionaries :param allow_mult: If True, it is okay to choose from multiple results of a single resolved object, or return all results found; if False, raise an error if multiple results are found :type allow_mult: boolean :param all_mult: If True, return all results if multiple results are found for a single resolved object; if False, user needs to choose a single result if multiple are found; the value of all_mult only has an effect if allow_mult is True) :type all_mult: boolean :param ask_to_resolve: Whether picking may be necessary (if True, a list is returned; if False, only one result is returned); if specified as True, then all results will be returned, regardless of the values of allow_mult and all_mult :type ask_to_resolve: boolean :returns: The results of resolving entity_name, expected to be of the following form: <resolved_object> # If only one result is present or the user # is able to select from multiple OR [<resolved_object>, ...] # If multiple results are present and # it is allowed where <resolved_object> is of the following form: {"project": <project_id>, "id": <object_id>} :rtype: dict or list of dicts :raises: ValueError if results is empty :raises: ResolutionError if too many results are found and the user is not in interactive mode and cannot select one Precondition: results must be a nonempty list Validates length of results. If there are multiple results found and the user is in interactive mode, then the user will be prompted to select a single result to be returned.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L750-L828
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
_resolve_global_entity
def _resolve_global_entity(project_or_job_id, folderpath, entity_name, describe=True, visibility="either"): """ :param project_or_job_id: The project ID to which the entity belongs (then the entity is an existing data object), or the job ID to which the entity belongs (then the entity is a job-based object reference to an object that may not exist yet) :type project_or_job_id: string :param folderpath: Full path to the object (parsed from command line) :type folderpath: string :param entity_name: Name of the object :type entity_name: string :param describe: Input mapping used to describe the job's project if project_or_job_id is a job ID, or True if the input mapping is to be empty :type describe: dict or True :param visibility: The expected visibility of the entity ("either", "hidden", or "visible"); to be used in resolution :type visibility: string :returns: The results obtained from attempting to resolve the entity; the expected format of the return value is described below :rtype: list :raises: ResolutionError if dxpy.find_data_objects throws an error If project_or_job_id is a job ID, then return value will be like: [{"id": ..., "describe": {...}}, ...] Otherwise, the return value will be like: [{"id": ..., "project": ..., "describe": {...}}, ...] Note that if the entity is successfully resolved, then the "describe" key will be in the dictionary if and only if a nonempty describe mapping was provided. TODO: Inspect entity_name and conditionally treat it as a "glob" pattern. TODO: Callers should specify exactly what fields they want, and then hopefully we can avoid having a default set of fields that may be very expensive """ if is_job_id(project_or_job_id): if describe is True: describe = {} # The following function call will raise a ResolutionError if no results # could be found. # If the call is successful, then the project will be incorporated into the # "describe" mapping of the returned dictionaries. return resolve_job_ref(project_or_job_id, entity_name, describe=describe) else: try: return list(dxpy.find_data_objects(project=project_or_job_id, folder=folderpath, name=entity_name, name_mode='glob', recurse=False, describe=describe, visibility=visibility)) except Exception as details: raise ResolutionError(str(details))
python
def _resolve_global_entity(project_or_job_id, folderpath, entity_name, describe=True, visibility="either"): """ :param project_or_job_id: The project ID to which the entity belongs (then the entity is an existing data object), or the job ID to which the entity belongs (then the entity is a job-based object reference to an object that may not exist yet) :type project_or_job_id: string :param folderpath: Full path to the object (parsed from command line) :type folderpath: string :param entity_name: Name of the object :type entity_name: string :param describe: Input mapping used to describe the job's project if project_or_job_id is a job ID, or True if the input mapping is to be empty :type describe: dict or True :param visibility: The expected visibility of the entity ("either", "hidden", or "visible"); to be used in resolution :type visibility: string :returns: The results obtained from attempting to resolve the entity; the expected format of the return value is described below :rtype: list :raises: ResolutionError if dxpy.find_data_objects throws an error If project_or_job_id is a job ID, then return value will be like: [{"id": ..., "describe": {...}}, ...] Otherwise, the return value will be like: [{"id": ..., "project": ..., "describe": {...}}, ...] Note that if the entity is successfully resolved, then the "describe" key will be in the dictionary if and only if a nonempty describe mapping was provided. TODO: Inspect entity_name and conditionally treat it as a "glob" pattern. TODO: Callers should specify exactly what fields they want, and then hopefully we can avoid having a default set of fields that may be very expensive """ if is_job_id(project_or_job_id): if describe is True: describe = {} # The following function call will raise a ResolutionError if no results # could be found. # If the call is successful, then the project will be incorporated into the # "describe" mapping of the returned dictionaries. return resolve_job_ref(project_or_job_id, entity_name, describe=describe) else: try: return list(dxpy.find_data_objects(project=project_or_job_id, folder=folderpath, name=entity_name, name_mode='glob', recurse=False, describe=describe, visibility=visibility)) except Exception as details: raise ResolutionError(str(details))
:param project_or_job_id: The project ID to which the entity belongs (then the entity is an existing data object), or the job ID to which the entity belongs (then the entity is a job-based object reference to an object that may not exist yet) :type project_or_job_id: string :param folderpath: Full path to the object (parsed from command line) :type folderpath: string :param entity_name: Name of the object :type entity_name: string :param describe: Input mapping used to describe the job's project if project_or_job_id is a job ID, or True if the input mapping is to be empty :type describe: dict or True :param visibility: The expected visibility of the entity ("either", "hidden", or "visible"); to be used in resolution :type visibility: string :returns: The results obtained from attempting to resolve the entity; the expected format of the return value is described below :rtype: list :raises: ResolutionError if dxpy.find_data_objects throws an error If project_or_job_id is a job ID, then return value will be like: [{"id": ..., "describe": {...}}, ...] Otherwise, the return value will be like: [{"id": ..., "project": ..., "describe": {...}}, ...] Note that if the entity is successfully resolved, then the "describe" key will be in the dictionary if and only if a nonempty describe mapping was provided. TODO: Inspect entity_name and conditionally treat it as a "glob" pattern. TODO: Callers should specify exactly what fields they want, and then hopefully we can avoid having a default set of fields that may be very expensive
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L831-L888
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
_format_resolution_output
def _format_resolution_output(path, project, folderpath, entity_name, result): """ :param path: Path to the object that required resolution; propagated from command-line :type path: string :param project: The potential project the entity belongs to :type project: string :param folderpath: Path to the entity :type folderpath: string :param entity_name: The name of the entity :type entity_name: string :param result: The result of resolving entity_name :type result: list of dictionaries :returns: The validated resolution output :rtype: dictionary Formats the output from the resolution of entity_name based on the number of resolved entities. If no results are found and entity_name can be resolved to a folder, then the return value will look like: {"project": <project>, "folder": <folder>, "name": None} If exactly one result is found, then the return value will look like: {"project": <project>, "folder": <folder>, "name": {"id": <id>, "project": <project>}} OR {"project": None, "folder": <folder>, "name": {"id": <id>, "project": <project>}} Else, the return value will look like: {"project": None, "folder": None, "name": None} """ try: if len(result) == 0: folder = _resolve_folder(project, folderpath, entity_name) return {"project": project, "folder": folder, "name": None} else: validated_results = _validate_resolution_output_length(path, entity_name, result) return {"project": None if is_job_id(project) else project, "folder": None, "name": validated_results} except ResolutionError: return {"project": None, "folder": None, "name": None}
python
def _format_resolution_output(path, project, folderpath, entity_name, result): """ :param path: Path to the object that required resolution; propagated from command-line :type path: string :param project: The potential project the entity belongs to :type project: string :param folderpath: Path to the entity :type folderpath: string :param entity_name: The name of the entity :type entity_name: string :param result: The result of resolving entity_name :type result: list of dictionaries :returns: The validated resolution output :rtype: dictionary Formats the output from the resolution of entity_name based on the number of resolved entities. If no results are found and entity_name can be resolved to a folder, then the return value will look like: {"project": <project>, "folder": <folder>, "name": None} If exactly one result is found, then the return value will look like: {"project": <project>, "folder": <folder>, "name": {"id": <id>, "project": <project>}} OR {"project": None, "folder": <folder>, "name": {"id": <id>, "project": <project>}} Else, the return value will look like: {"project": None, "folder": None, "name": None} """ try: if len(result) == 0: folder = _resolve_folder(project, folderpath, entity_name) return {"project": project, "folder": folder, "name": None} else: validated_results = _validate_resolution_output_length(path, entity_name, result) return {"project": None if is_job_id(project) else project, "folder": None, "name": validated_results} except ResolutionError: return {"project": None, "folder": None, "name": None}
:param path: Path to the object that required resolution; propagated from command-line :type path: string :param project: The potential project the entity belongs to :type project: string :param folderpath: Path to the entity :type folderpath: string :param entity_name: The name of the entity :type entity_name: string :param result: The result of resolving entity_name :type result: list of dictionaries :returns: The validated resolution output :rtype: dictionary Formats the output from the resolution of entity_name based on the number of resolved entities. If no results are found and entity_name can be resolved to a folder, then the return value will look like: {"project": <project>, "folder": <folder>, "name": None} If exactly one result is found, then the return value will look like: {"project": <project>, "folder": <folder>, "name": {"id": <id>, "project": <project>}} OR {"project": None, "folder": <folder>, "name": {"id": <id>, "project": <project>}} Else, the return value will look like: {"project": None, "folder": None, "name": None}
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L891-L933
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
resolve_multiple_existing_paths
def resolve_multiple_existing_paths(paths): """ :param paths: A list of paths to items that need to be resolved :type paths: list :returns: A dictionary mapping a specified path to either its resolved object or Nones, if the object could not be resolved :rtype: dict For each input given in paths, attempts to resolve the path, and returns the resolved object in a dictionary. The return value will look like: {<path1>: <resolved_object1>, <path2>: <resolved_object2>,...} If entity_id is a DX ID that can be described, <resolved_object*> ::= {"project": None, "folder": None, "name": {"id": <id>, "describe": <describe_output>}} Else if a general resolution (or search) method will be used to resolve the entity, <resolved_object*> ::= {"project": <project>, "folder": None, "name": {"project": <project>, "id": <resolved_id>}} Else if <project> is a job ID, <resolved_object*> ::= {"project": None, "folder": None, "name": {"project": <project>, "id": <resolved_id>}} Else if the path refers to a folder instead of a data object, <resolved_object*> ::= {"project": <project>, "folder": <folder>, "name": None} Else if description or resolution fails, <resolved_object*> ::= {"project": None, "folder": None, "name": None} """ done_objects = {} # Return value to_resolve_in_batch_paths = [] # Paths to resolve to_resolve_in_batch_inputs = [] # Project, folderpath, and entity name for path in paths: project, folderpath, entity_name = resolve_path(path, expected='entity') try: must_resolve, project, folderpath, entity_name = _check_resolution_needed( path, project, folderpath, entity_name) except: must_resolve = False if must_resolve: if is_glob_pattern(entity_name): # TODO: Must call findDataObjects because resolveDataObjects does not support glob patterns try: find_results = _resolve_global_entity(project, folderpath, entity_name) done_objects[path] = _format_resolution_output(path, project, folderpath, entity_name, find_results) except ResolutionError: # Catches any ResolutionError thrown by _resolve_global_entity done_objects[path] = {"project": None, "folder": None, "name": None} else: # Prepare batch call for resolveDataObjects to_resolve_in_batch_paths.append(path) to_resolve_in_batch_inputs.append({"project": project, "folder": folderpath, "name": entity_name}) else: # No need to resolve done_objects[path] = {"project": project, "folder": folderpath, "name": entity_name} # Call resolveDataObjects resolution_results = dxpy.resolve_data_objects(to_resolve_in_batch_inputs) for path, inputs, result in zip(to_resolve_in_batch_paths, to_resolve_in_batch_inputs, resolution_results): done_objects[path] = _format_resolution_output(path, inputs["project"], inputs["folder"], inputs["name"], result) return done_objects
python
def resolve_multiple_existing_paths(paths): """ :param paths: A list of paths to items that need to be resolved :type paths: list :returns: A dictionary mapping a specified path to either its resolved object or Nones, if the object could not be resolved :rtype: dict For each input given in paths, attempts to resolve the path, and returns the resolved object in a dictionary. The return value will look like: {<path1>: <resolved_object1>, <path2>: <resolved_object2>,...} If entity_id is a DX ID that can be described, <resolved_object*> ::= {"project": None, "folder": None, "name": {"id": <id>, "describe": <describe_output>}} Else if a general resolution (or search) method will be used to resolve the entity, <resolved_object*> ::= {"project": <project>, "folder": None, "name": {"project": <project>, "id": <resolved_id>}} Else if <project> is a job ID, <resolved_object*> ::= {"project": None, "folder": None, "name": {"project": <project>, "id": <resolved_id>}} Else if the path refers to a folder instead of a data object, <resolved_object*> ::= {"project": <project>, "folder": <folder>, "name": None} Else if description or resolution fails, <resolved_object*> ::= {"project": None, "folder": None, "name": None} """ done_objects = {} # Return value to_resolve_in_batch_paths = [] # Paths to resolve to_resolve_in_batch_inputs = [] # Project, folderpath, and entity name for path in paths: project, folderpath, entity_name = resolve_path(path, expected='entity') try: must_resolve, project, folderpath, entity_name = _check_resolution_needed( path, project, folderpath, entity_name) except: must_resolve = False if must_resolve: if is_glob_pattern(entity_name): # TODO: Must call findDataObjects because resolveDataObjects does not support glob patterns try: find_results = _resolve_global_entity(project, folderpath, entity_name) done_objects[path] = _format_resolution_output(path, project, folderpath, entity_name, find_results) except ResolutionError: # Catches any ResolutionError thrown by _resolve_global_entity done_objects[path] = {"project": None, "folder": None, "name": None} else: # Prepare batch call for resolveDataObjects to_resolve_in_batch_paths.append(path) to_resolve_in_batch_inputs.append({"project": project, "folder": folderpath, "name": entity_name}) else: # No need to resolve done_objects[path] = {"project": project, "folder": folderpath, "name": entity_name} # Call resolveDataObjects resolution_results = dxpy.resolve_data_objects(to_resolve_in_batch_inputs) for path, inputs, result in zip(to_resolve_in_batch_paths, to_resolve_in_batch_inputs, resolution_results): done_objects[path] = _format_resolution_output(path, inputs["project"], inputs["folder"], inputs["name"], result) return done_objects
:param paths: A list of paths to items that need to be resolved :type paths: list :returns: A dictionary mapping a specified path to either its resolved object or Nones, if the object could not be resolved :rtype: dict For each input given in paths, attempts to resolve the path, and returns the resolved object in a dictionary. The return value will look like: {<path1>: <resolved_object1>, <path2>: <resolved_object2>,...} If entity_id is a DX ID that can be described, <resolved_object*> ::= {"project": None, "folder": None, "name": {"id": <id>, "describe": <describe_output>}} Else if a general resolution (or search) method will be used to resolve the entity, <resolved_object*> ::= {"project": <project>, "folder": None, "name": {"project": <project>, "id": <resolved_id>}} Else if <project> is a job ID, <resolved_object*> ::= {"project": None, "folder": None, "name": {"project": <project>, "id": <resolved_id>}} Else if the path refers to a folder instead of a data object, <resolved_object*> ::= {"project": <project>, "folder": <folder>, "name": None} Else if description or resolution fails, <resolved_object*> ::= {"project": None, "folder": None, "name": None}
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L936-L1012
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
resolve_existing_path
def resolve_existing_path(path, expected=None, ask_to_resolve=True, expected_classes=None, allow_mult=False, describe=True, all_mult=False, allow_empty_string=True, visibility="either"): ''' :param expected: one of the following: "folder", "entity", or None to indicate whether the expected path is a folder, a data object, or either :type expected: string or None :param ask_to_resolve: Whether picking may be necessary (if true, a list is returned; if false, only one result is returned) :type ask_to_resolve: boolean :param expected_classes: A list of expected classes the entity is allowed to belong to if it is an ID (e.g. "record", "file", "job"); if None, then entity_name may be any data object class :type expected_classes: list or None :param allow_mult: Whether to allow the user to select multiple results from the same path :type allow_mult: boolean :param describe: Input hash to describe call for the results, or True if no describe input is to be provided :type describe: dict or True :param all_mult: Whether to return all matching results without prompting (only applicable if allow_mult == True) :type all_mult: boolean :returns: A LIST of results when ask_to_resolve is False or allow_mult is True :raises: :exc:`ResolutionError` if the request path was invalid, or a single result was requested and input is not a TTY :param allow_empty_string: If false, a ResolutionError will be raised if *path* is an empty string. Use this when resolving the empty string could result in unexpected behavior. :type allow_empty_string: boolean :param visibility: The visibility expected ("either", "hidden", or "visible") :type visibility: string Returns either a list of results or a single result (depending on how many is expected; if only one, then an interactive picking of a choice will be initiated if input is a tty, or else throw an error). TODO: Always treats the path as a glob pattern. Output is of the form {"id": id, "describe": describe hash} a list of those TODO: Callers should specify exactly what fields they want, and then hopefully we can avoid having a default set of fields that may be very expensive NOTE: if expected_classes is provided and conflicts with the class of the hash ID, it will return None for all fields. ''' project, folderpath, entity_name = resolve_path(path, expected=expected, allow_empty_string=allow_empty_string) must_resolve, project, folderpath, entity_name = _check_resolution_needed(path, project, folderpath, entity_name, expected_classes=expected_classes, describe=describe, enclose_in_list=(not ask_to_resolve or allow_mult)) if must_resolve: results = _resolve_global_entity(project, folderpath, entity_name, describe=describe, visibility=visibility) if len(results) == 0: # Could not resolve entity, so it is probably a folder folder = _resolve_folder(project, folderpath, entity_name) return project, folder, None else: validated_results = _validate_resolution_output_length(path, entity_name, results, allow_mult=allow_mult, all_mult=all_mult, ask_to_resolve=ask_to_resolve) if is_job_id(project): return None, None, validated_results return project, None, validated_results return project, folderpath, entity_name
python
def resolve_existing_path(path, expected=None, ask_to_resolve=True, expected_classes=None, allow_mult=False, describe=True, all_mult=False, allow_empty_string=True, visibility="either"): ''' :param expected: one of the following: "folder", "entity", or None to indicate whether the expected path is a folder, a data object, or either :type expected: string or None :param ask_to_resolve: Whether picking may be necessary (if true, a list is returned; if false, only one result is returned) :type ask_to_resolve: boolean :param expected_classes: A list of expected classes the entity is allowed to belong to if it is an ID (e.g. "record", "file", "job"); if None, then entity_name may be any data object class :type expected_classes: list or None :param allow_mult: Whether to allow the user to select multiple results from the same path :type allow_mult: boolean :param describe: Input hash to describe call for the results, or True if no describe input is to be provided :type describe: dict or True :param all_mult: Whether to return all matching results without prompting (only applicable if allow_mult == True) :type all_mult: boolean :returns: A LIST of results when ask_to_resolve is False or allow_mult is True :raises: :exc:`ResolutionError` if the request path was invalid, or a single result was requested and input is not a TTY :param allow_empty_string: If false, a ResolutionError will be raised if *path* is an empty string. Use this when resolving the empty string could result in unexpected behavior. :type allow_empty_string: boolean :param visibility: The visibility expected ("either", "hidden", or "visible") :type visibility: string Returns either a list of results or a single result (depending on how many is expected; if only one, then an interactive picking of a choice will be initiated if input is a tty, or else throw an error). TODO: Always treats the path as a glob pattern. Output is of the form {"id": id, "describe": describe hash} a list of those TODO: Callers should specify exactly what fields they want, and then hopefully we can avoid having a default set of fields that may be very expensive NOTE: if expected_classes is provided and conflicts with the class of the hash ID, it will return None for all fields. ''' project, folderpath, entity_name = resolve_path(path, expected=expected, allow_empty_string=allow_empty_string) must_resolve, project, folderpath, entity_name = _check_resolution_needed(path, project, folderpath, entity_name, expected_classes=expected_classes, describe=describe, enclose_in_list=(not ask_to_resolve or allow_mult)) if must_resolve: results = _resolve_global_entity(project, folderpath, entity_name, describe=describe, visibility=visibility) if len(results) == 0: # Could not resolve entity, so it is probably a folder folder = _resolve_folder(project, folderpath, entity_name) return project, folder, None else: validated_results = _validate_resolution_output_length(path, entity_name, results, allow_mult=allow_mult, all_mult=all_mult, ask_to_resolve=ask_to_resolve) if is_job_id(project): return None, None, validated_results return project, None, validated_results return project, folderpath, entity_name
:param expected: one of the following: "folder", "entity", or None to indicate whether the expected path is a folder, a data object, or either :type expected: string or None :param ask_to_resolve: Whether picking may be necessary (if true, a list is returned; if false, only one result is returned) :type ask_to_resolve: boolean :param expected_classes: A list of expected classes the entity is allowed to belong to if it is an ID (e.g. "record", "file", "job"); if None, then entity_name may be any data object class :type expected_classes: list or None :param allow_mult: Whether to allow the user to select multiple results from the same path :type allow_mult: boolean :param describe: Input hash to describe call for the results, or True if no describe input is to be provided :type describe: dict or True :param all_mult: Whether to return all matching results without prompting (only applicable if allow_mult == True) :type all_mult: boolean :returns: A LIST of results when ask_to_resolve is False or allow_mult is True :raises: :exc:`ResolutionError` if the request path was invalid, or a single result was requested and input is not a TTY :param allow_empty_string: If false, a ResolutionError will be raised if *path* is an empty string. Use this when resolving the empty string could result in unexpected behavior. :type allow_empty_string: boolean :param visibility: The visibility expected ("either", "hidden", or "visible") :type visibility: string Returns either a list of results or a single result (depending on how many is expected; if only one, then an interactive picking of a choice will be initiated if input is a tty, or else throw an error). TODO: Always treats the path as a glob pattern. Output is of the form {"id": id, "describe": describe hash} a list of those TODO: Callers should specify exactly what fields they want, and then hopefully we can avoid having a default set of fields that may be very expensive NOTE: if expected_classes is provided and conflicts with the class of the hash ID, it will return None for all fields.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L1015-L1084
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
check_folder_exists
def check_folder_exists(project, path, folder_name): ''' :param project: project id :type project: string :param path: path to where we should look for the folder in question :type path: string :param folder_name: name of the folder in question :type folder_name: string :returns: A boolean True or False whether the folder exists at the specified path :type: boolean :raises: :exc:'ResolutionError' if dxpy.api.container_list_folder raises an exception This function returns a boolean value that indicates whether a folder of the specified name exists at the specified path Note: this function will NOT work on the root folder case, i.e. '/' ''' if folder_name is None or path is None: return False try: folder_list = dxpy.api.container_list_folder(project, {"folder": path, "only": "folders"}) except dxpy.exceptions.DXAPIError as e: if e.name == 'ResourceNotFound': raise ResolutionError(str(e.msg)) else: raise e target_folder = path + '/' + folder_name # sanitize input if necessary target_folder, _skip = clean_folder_path(target_folder, 'folder') # Check that folder name exists in return from list folder API call return target_folder in folder_list['folders']
python
def check_folder_exists(project, path, folder_name): ''' :param project: project id :type project: string :param path: path to where we should look for the folder in question :type path: string :param folder_name: name of the folder in question :type folder_name: string :returns: A boolean True or False whether the folder exists at the specified path :type: boolean :raises: :exc:'ResolutionError' if dxpy.api.container_list_folder raises an exception This function returns a boolean value that indicates whether a folder of the specified name exists at the specified path Note: this function will NOT work on the root folder case, i.e. '/' ''' if folder_name is None or path is None: return False try: folder_list = dxpy.api.container_list_folder(project, {"folder": path, "only": "folders"}) except dxpy.exceptions.DXAPIError as e: if e.name == 'ResourceNotFound': raise ResolutionError(str(e.msg)) else: raise e target_folder = path + '/' + folder_name # sanitize input if necessary target_folder, _skip = clean_folder_path(target_folder, 'folder') # Check that folder name exists in return from list folder API call return target_folder in folder_list['folders']
:param project: project id :type project: string :param path: path to where we should look for the folder in question :type path: string :param folder_name: name of the folder in question :type folder_name: string :returns: A boolean True or False whether the folder exists at the specified path :type: boolean :raises: :exc:'ResolutionError' if dxpy.api.container_list_folder raises an exception This function returns a boolean value that indicates whether a folder of the specified name exists at the specified path Note: this function will NOT work on the root folder case, i.e. '/'
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L1087-L1118
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
get_app_from_path
def get_app_from_path(path): ''' :param path: A string to attempt to resolve to an app object :type path: string :returns: The describe hash of the app object if found, or None otherwise :rtype: dict or None This method parses a string that is expected to perhaps refer to an app object. If found, its describe hash will be returned. For more information on the contents of this hash, see the API documentation. [TODO: external link here] ''' alias = None if not path.startswith('app-'): path = 'app-' + path if '/' in path: alias = path[path.find('/') + 1:] path = path[:path.find('/')] try: return dxpy.api.app_describe(path, alias=alias) except dxpy.DXAPIError: return None
python
def get_app_from_path(path): ''' :param path: A string to attempt to resolve to an app object :type path: string :returns: The describe hash of the app object if found, or None otherwise :rtype: dict or None This method parses a string that is expected to perhaps refer to an app object. If found, its describe hash will be returned. For more information on the contents of this hash, see the API documentation. [TODO: external link here] ''' alias = None if not path.startswith('app-'): path = 'app-' + path if '/' in path: alias = path[path.find('/') + 1:] path = path[:path.find('/')] try: return dxpy.api.app_describe(path, alias=alias) except dxpy.DXAPIError: return None
:param path: A string to attempt to resolve to an app object :type path: string :returns: The describe hash of the app object if found, or None otherwise :rtype: dict or None This method parses a string that is expected to perhaps refer to an app object. If found, its describe hash will be returned. For more information on the contents of this hash, see the API documentation. [TODO: external link here]
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L1120-L1142
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
get_global_workflow_from_path
def get_global_workflow_from_path(path): ''' :param path: A string to attempt to resolve to a global workflow object :type path: string :returns: The describe hash of the global workflow object if found, or None otherwise :rtype: dict or None This method parses a string that is expected to perhaps refer to a global workflow object. If found, its describe hash will be returned. For more information on the contents of this hash, see the API documentation. [TODO: external link here] ''' alias = None if not path.startswith('globalworkflow-'): path = 'globalworkflow-' + path if '/' in path: alias = path[path.find('/') + 1:] path = path[:path.find('/')] try: return dxpy.api.global_workflow_describe(path, alias=alias) except dxpy.DXAPIError: return None
python
def get_global_workflow_from_path(path): ''' :param path: A string to attempt to resolve to a global workflow object :type path: string :returns: The describe hash of the global workflow object if found, or None otherwise :rtype: dict or None This method parses a string that is expected to perhaps refer to a global workflow object. If found, its describe hash will be returned. For more information on the contents of this hash, see the API documentation. [TODO: external link here] ''' alias = None if not path.startswith('globalworkflow-'): path = 'globalworkflow-' + path if '/' in path: alias = path[path.find('/') + 1:] path = path[:path.find('/')] try: return dxpy.api.global_workflow_describe(path, alias=alias) except dxpy.DXAPIError: return None
:param path: A string to attempt to resolve to a global workflow object :type path: string :returns: The describe hash of the global workflow object if found, or None otherwise :rtype: dict or None This method parses a string that is expected to perhaps refer to a global workflow object. If found, its describe hash will be returned. For more information on the contents of this hash, see the API documentation. [TODO: external link here]
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L1144-L1167
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
resolve_global_executable
def resolve_global_executable(path, is_version_required=False): """ :param path: A string which is supposed to identify a global executable (app or workflow) :type path: string :param is_version_required: If set to True, the path has to specify a specific version/alias, e.g. "myapp/1.0.0" :type is_version_required: boolean :returns: The describe hash of the global executable object (app or workflow) :raises: :exc:`ResolutionError` if it cannot be found *path* is expected to have one of the following forms: - hash ID, e.g. "globalworkflow-F85Z6bQ0xku1PKY6FjGQ011J", "app-FBZ3f200yfzkKYyp9JkFVQ97" - named ID, e.g. "app-myapp", "globalworkflow-myworkflow" - named ID with alias (version or tag), e.g. "myapp/1.2.0", "myworkflow/1.2.0" - named ID with prefix and with alias (version or tag), e.g. "app-myapp/1.2.0", "globalworkflow-myworkflow/1.2.0" """ if not is_hashid(path) and is_version_required and "/" not in path: raise ResolutionError('Version is required, e.g. "myexec/1.0.0"'.format()) # First, check if the prefix is provided, then we don't have to resolve the name if path.startswith('app-'): return resolve_app(path) elif path.startswith('globalworkflow-'): return resolve_global_workflow(path) # If the path doesn't include a prefix, we must try describing # as an app and, if that fails, as a global workflow desc = get_app_from_path(path) if not desc: desc = get_global_workflow_from_path(path) if desc is None: raise ResolutionError( 'The given path "' + path + '" could not be resolved to an accessible global executable (app or workflow)') return desc
python
def resolve_global_executable(path, is_version_required=False): """ :param path: A string which is supposed to identify a global executable (app or workflow) :type path: string :param is_version_required: If set to True, the path has to specify a specific version/alias, e.g. "myapp/1.0.0" :type is_version_required: boolean :returns: The describe hash of the global executable object (app or workflow) :raises: :exc:`ResolutionError` if it cannot be found *path* is expected to have one of the following forms: - hash ID, e.g. "globalworkflow-F85Z6bQ0xku1PKY6FjGQ011J", "app-FBZ3f200yfzkKYyp9JkFVQ97" - named ID, e.g. "app-myapp", "globalworkflow-myworkflow" - named ID with alias (version or tag), e.g. "myapp/1.2.0", "myworkflow/1.2.0" - named ID with prefix and with alias (version or tag), e.g. "app-myapp/1.2.0", "globalworkflow-myworkflow/1.2.0" """ if not is_hashid(path) and is_version_required and "/" not in path: raise ResolutionError('Version is required, e.g. "myexec/1.0.0"'.format()) # First, check if the prefix is provided, then we don't have to resolve the name if path.startswith('app-'): return resolve_app(path) elif path.startswith('globalworkflow-'): return resolve_global_workflow(path) # If the path doesn't include a prefix, we must try describing # as an app and, if that fails, as a global workflow desc = get_app_from_path(path) if not desc: desc = get_global_workflow_from_path(path) if desc is None: raise ResolutionError( 'The given path "' + path + '" could not be resolved to an accessible global executable (app or workflow)') return desc
:param path: A string which is supposed to identify a global executable (app or workflow) :type path: string :param is_version_required: If set to True, the path has to specify a specific version/alias, e.g. "myapp/1.0.0" :type is_version_required: boolean :returns: The describe hash of the global executable object (app or workflow) :raises: :exc:`ResolutionError` if it cannot be found *path* is expected to have one of the following forms: - hash ID, e.g. "globalworkflow-F85Z6bQ0xku1PKY6FjGQ011J", "app-FBZ3f200yfzkKYyp9JkFVQ97" - named ID, e.g. "app-myapp", "globalworkflow-myworkflow" - named ID with alias (version or tag), e.g. "myapp/1.2.0", "myworkflow/1.2.0" - named ID with prefix and with alias (version or tag), e.g. "app-myapp/1.2.0", "globalworkflow-myworkflow/1.2.0"
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L1220-L1253
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
resolve_to_objects_or_project
def resolve_to_objects_or_project(path, all_matching_results=False): ''' :param path: Path to resolve :type path: string :param all_matching_results: Whether to return a list of all matching results :type all_matching_results: boolean A thin wrapper over :meth:`resolve_existing_path` which throws an error if the path does not look like a project and doesn't match a data object path. Returns either a list of results or a single result (depending on how many is expected; if only one, then an interactive picking of a choice will be initiated if input is a tty, or else throw an error). ''' # Attempt to resolve name project, folderpath, entity_results = resolve_existing_path(path, expected='entity', allow_mult=True, all_mult=all_matching_results) if entity_results is None and not is_container_id(path): if folderpath != None and folderpath != '/': raise ResolutionError('Could not resolve "' + path + \ '''" to an existing data object or to only a project; if you were attempting to refer to a project by name, please append a colon ":" to indicate that it is a project.''') return project, folderpath, entity_results
python
def resolve_to_objects_or_project(path, all_matching_results=False): ''' :param path: Path to resolve :type path: string :param all_matching_results: Whether to return a list of all matching results :type all_matching_results: boolean A thin wrapper over :meth:`resolve_existing_path` which throws an error if the path does not look like a project and doesn't match a data object path. Returns either a list of results or a single result (depending on how many is expected; if only one, then an interactive picking of a choice will be initiated if input is a tty, or else throw an error). ''' # Attempt to resolve name project, folderpath, entity_results = resolve_existing_path(path, expected='entity', allow_mult=True, all_mult=all_matching_results) if entity_results is None and not is_container_id(path): if folderpath != None and folderpath != '/': raise ResolutionError('Could not resolve "' + path + \ '''" to an existing data object or to only a project; if you were attempting to refer to a project by name, please append a colon ":" to indicate that it is a project.''') return project, folderpath, entity_results
:param path: Path to resolve :type path: string :param all_matching_results: Whether to return a list of all matching results :type all_matching_results: boolean A thin wrapper over :meth:`resolve_existing_path` which throws an error if the path does not look like a project and doesn't match a data object path. Returns either a list of results or a single result (depending on how many is expected; if only one, then an interactive picking of a choice will be initiated if input is a tty, or else throw an error).
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L1320-L1346
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxjob.py
new_dxjob
def new_dxjob(fn_input, fn_name, name=None, tags=None, properties=None, details=None, instance_type=None, depends_on=None, **kwargs): ''' :param fn_input: Function input :type fn_input: dict :param fn_name: Name of the function to be called :type fn_name: string :param name: Name for the new job (default is "<parent job name>:<fn_name>") :type name: string :param tags: Tags to associate with the job :type tags: list of strings :param properties: Properties to associate with the job :type properties: dict with string values :param details: Details to set for the job :type details: dict or list :param instance_type: Instance type on which the job will be run, or a dict mapping function names to instance type requests :type instance_type: string or dict :param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID :type depends_on: list :rtype: :class:`~dxpy.bindings.dxjob.DXJob` Creates and enqueues a new job that will execute a particular function (from the same app or applet as the one the current job is running). Returns the :class:`~dxpy.bindings.dxjob.DXJob` handle for the job. Note that this function is shorthand for:: dxjob = DXJob() dxjob.new(fn_input, fn_name, **kwargs) .. note:: This method is intended for calls made from within already-executing jobs or apps. If it is called from outside of an Execution Environment, an exception will be thrown. To create new jobs from outside the Execution Environment, use :func:`dxpy.bindings.dxapplet.DXApplet.run` or :func:`dxpy.bindings.dxapp.DXApp.run`. .. note:: If the environment variable ``DX_JOB_ID`` is not set, this method assmes that it is running within the debug harness, executes the job in place, and provides a debug job handler object that does not have a corresponding remote API job object. ''' dxjob = DXJob() dxjob.new(fn_input, fn_name, name=name, tags=tags, properties=properties, details=details, instance_type=instance_type, depends_on=depends_on, **kwargs) return dxjob
python
def new_dxjob(fn_input, fn_name, name=None, tags=None, properties=None, details=None, instance_type=None, depends_on=None, **kwargs): ''' :param fn_input: Function input :type fn_input: dict :param fn_name: Name of the function to be called :type fn_name: string :param name: Name for the new job (default is "<parent job name>:<fn_name>") :type name: string :param tags: Tags to associate with the job :type tags: list of strings :param properties: Properties to associate with the job :type properties: dict with string values :param details: Details to set for the job :type details: dict or list :param instance_type: Instance type on which the job will be run, or a dict mapping function names to instance type requests :type instance_type: string or dict :param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID :type depends_on: list :rtype: :class:`~dxpy.bindings.dxjob.DXJob` Creates and enqueues a new job that will execute a particular function (from the same app or applet as the one the current job is running). Returns the :class:`~dxpy.bindings.dxjob.DXJob` handle for the job. Note that this function is shorthand for:: dxjob = DXJob() dxjob.new(fn_input, fn_name, **kwargs) .. note:: This method is intended for calls made from within already-executing jobs or apps. If it is called from outside of an Execution Environment, an exception will be thrown. To create new jobs from outside the Execution Environment, use :func:`dxpy.bindings.dxapplet.DXApplet.run` or :func:`dxpy.bindings.dxapp.DXApp.run`. .. note:: If the environment variable ``DX_JOB_ID`` is not set, this method assmes that it is running within the debug harness, executes the job in place, and provides a debug job handler object that does not have a corresponding remote API job object. ''' dxjob = DXJob() dxjob.new(fn_input, fn_name, name=name, tags=tags, properties=properties, details=details, instance_type=instance_type, depends_on=depends_on, **kwargs) return dxjob
:param fn_input: Function input :type fn_input: dict :param fn_name: Name of the function to be called :type fn_name: string :param name: Name for the new job (default is "<parent job name>:<fn_name>") :type name: string :param tags: Tags to associate with the job :type tags: list of strings :param properties: Properties to associate with the job :type properties: dict with string values :param details: Details to set for the job :type details: dict or list :param instance_type: Instance type on which the job will be run, or a dict mapping function names to instance type requests :type instance_type: string or dict :param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID :type depends_on: list :rtype: :class:`~dxpy.bindings.dxjob.DXJob` Creates and enqueues a new job that will execute a particular function (from the same app or applet as the one the current job is running). Returns the :class:`~dxpy.bindings.dxjob.DXJob` handle for the job. Note that this function is shorthand for:: dxjob = DXJob() dxjob.new(fn_input, fn_name, **kwargs) .. note:: This method is intended for calls made from within already-executing jobs or apps. If it is called from outside of an Execution Environment, an exception will be thrown. To create new jobs from outside the Execution Environment, use :func:`dxpy.bindings.dxapplet.DXApplet.run` or :func:`dxpy.bindings.dxapp.DXApp.run`. .. note:: If the environment variable ``DX_JOB_ID`` is not set, this method assmes that it is running within the debug harness, executes the job in place, and provides a debug job handler object that does not have a corresponding remote API job object.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxjob.py#L45-L90
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxjob.py
DXJob.new
def new(self, fn_input, fn_name, name=None, tags=None, properties=None, details=None, instance_type=None, depends_on=None, **kwargs): ''' :param fn_input: Function input :type fn_input: dict :param fn_name: Name of the function to be called :type fn_name: string :param name: Name for the new job (default is "<parent job name>:<fn_name>") :type name: string :param tags: Tags to associate with the job :type tags: list of strings :param properties: Properties to associate with the job :type properties: dict with string values :param details: Details to set for the job :type details: dict or list :param instance_type: Instance type on which the job will be run, or a dict mapping function names to instance type requests :type instance_type: string or dict :param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID :type depends_on: list Creates and enqueues a new job that will execute a particular function (from the same app or applet as the one the current job is running). .. note:: This method is intended for calls made from within already-executing jobs or apps. If it is called from outside of an Execution Environment, an exception will be thrown. To create new jobs from outside the Execution Environment, use :func:`dxpy.bindings.dxapplet.DXApplet.run` or :func:`dxpy.bindings.dxapp.DXApp.run`. ''' final_depends_on = [] if depends_on is not None: if isinstance(depends_on, list): for item in depends_on: if isinstance(item, DXJob) or isinstance(item, DXDataObject): if item.get_id() is None: raise DXError('A dxpy handler given in depends_on does not have an ID set') final_depends_on.append(item.get_id()) elif isinstance(item, basestring): final_depends_on.append(item) else: raise DXError('Expected elements of depends_on to only be either instances of DXJob or DXDataObject, or strings') else: raise DXError('Expected depends_on field to be a list') if 'DX_JOB_ID' in os.environ: req_input = {} req_input["input"] = fn_input req_input["function"] = fn_name if name is not None: req_input["name"] = name if tags is not None: req_input["tags"] = tags if properties is not None: req_input["properties"] = properties if instance_type is not None: req_input["systemRequirements"] = SystemRequirementsDict.from_instance_type(instance_type, fn_name).as_dict() if depends_on is not None: req_input["dependsOn"] = final_depends_on if details is not None: req_input["details"] = details resp = dxpy.api.job_new(req_input, **kwargs) self.set_id(resp["id"]) else: self.set_id(queue_entry_point(function=fn_name, input_hash=fn_input, depends_on=final_depends_on, name=name))
python
def new(self, fn_input, fn_name, name=None, tags=None, properties=None, details=None, instance_type=None, depends_on=None, **kwargs): ''' :param fn_input: Function input :type fn_input: dict :param fn_name: Name of the function to be called :type fn_name: string :param name: Name for the new job (default is "<parent job name>:<fn_name>") :type name: string :param tags: Tags to associate with the job :type tags: list of strings :param properties: Properties to associate with the job :type properties: dict with string values :param details: Details to set for the job :type details: dict or list :param instance_type: Instance type on which the job will be run, or a dict mapping function names to instance type requests :type instance_type: string or dict :param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID :type depends_on: list Creates and enqueues a new job that will execute a particular function (from the same app or applet as the one the current job is running). .. note:: This method is intended for calls made from within already-executing jobs or apps. If it is called from outside of an Execution Environment, an exception will be thrown. To create new jobs from outside the Execution Environment, use :func:`dxpy.bindings.dxapplet.DXApplet.run` or :func:`dxpy.bindings.dxapp.DXApp.run`. ''' final_depends_on = [] if depends_on is not None: if isinstance(depends_on, list): for item in depends_on: if isinstance(item, DXJob) or isinstance(item, DXDataObject): if item.get_id() is None: raise DXError('A dxpy handler given in depends_on does not have an ID set') final_depends_on.append(item.get_id()) elif isinstance(item, basestring): final_depends_on.append(item) else: raise DXError('Expected elements of depends_on to only be either instances of DXJob or DXDataObject, or strings') else: raise DXError('Expected depends_on field to be a list') if 'DX_JOB_ID' in os.environ: req_input = {} req_input["input"] = fn_input req_input["function"] = fn_name if name is not None: req_input["name"] = name if tags is not None: req_input["tags"] = tags if properties is not None: req_input["properties"] = properties if instance_type is not None: req_input["systemRequirements"] = SystemRequirementsDict.from_instance_type(instance_type, fn_name).as_dict() if depends_on is not None: req_input["dependsOn"] = final_depends_on if details is not None: req_input["details"] = details resp = dxpy.api.job_new(req_input, **kwargs) self.set_id(resp["id"]) else: self.set_id(queue_entry_point(function=fn_name, input_hash=fn_input, depends_on=final_depends_on, name=name))
:param fn_input: Function input :type fn_input: dict :param fn_name: Name of the function to be called :type fn_name: string :param name: Name for the new job (default is "<parent job name>:<fn_name>") :type name: string :param tags: Tags to associate with the job :type tags: list of strings :param properties: Properties to associate with the job :type properties: dict with string values :param details: Details to set for the job :type details: dict or list :param instance_type: Instance type on which the job will be run, or a dict mapping function names to instance type requests :type instance_type: string or dict :param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID :type depends_on: list Creates and enqueues a new job that will execute a particular function (from the same app or applet as the one the current job is running). .. note:: This method is intended for calls made from within already-executing jobs or apps. If it is called from outside of an Execution Environment, an exception will be thrown. To create new jobs from outside the Execution Environment, use :func:`dxpy.bindings.dxapplet.DXApplet.run` or :func:`dxpy.bindings.dxapp.DXApp.run`.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxjob.py#L104-L173
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxjob.py
DXJob.set_id
def set_id(self, dxid): ''' :param dxid: New job ID to be associated with the handler (localjob IDs also accepted for local runs) :type dxid: string Discards the currently stored ID and associates the handler with *dxid* ''' if dxid is not None: if not (isinstance(dxid, basestring) and dxid.startswith('localjob-')): # localjob IDs (which do not follow the usual ID # syntax) should be allowed; otherwise, follow the # usual syntax checking verify_string_dxid(dxid, self._class) self._dxid = dxid
python
def set_id(self, dxid): ''' :param dxid: New job ID to be associated with the handler (localjob IDs also accepted for local runs) :type dxid: string Discards the currently stored ID and associates the handler with *dxid* ''' if dxid is not None: if not (isinstance(dxid, basestring) and dxid.startswith('localjob-')): # localjob IDs (which do not follow the usual ID # syntax) should be allowed; otherwise, follow the # usual syntax checking verify_string_dxid(dxid, self._class) self._dxid = dxid
:param dxid: New job ID to be associated with the handler (localjob IDs also accepted for local runs) :type dxid: string Discards the currently stored ID and associates the handler with *dxid*
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxjob.py#L175-L188
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxjob.py
DXJob.describe
def describe(self, fields=None, io=None, **kwargs): """ :param fields: dict where the keys are field names that should be returned, and values should be set to True (by default, all fields are returned) :type fields: dict :param io: Include input and output fields in description; cannot be provided with *fields*; default is True if *fields* is not provided (deprecated) :type io: bool :returns: Description of the job :rtype: dict Returns a hash with key-value pairs containing information about the job, including its state and (optionally) its inputs and outputs, as described in the API documentation for the `/job-xxxx/describe <https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method:-/job-xxxx/describe>`_ method. """ if fields is not None and io is not None: raise DXError('DXJob.describe: cannot provide non-None values for both fields and io') describe_input = {} if fields is not None: describe_input['fields'] = fields if io is not None: describe_input['io'] = io self._desc = dxpy.api.job_describe(self._dxid, describe_input, **kwargs) return self._desc
python
def describe(self, fields=None, io=None, **kwargs): """ :param fields: dict where the keys are field names that should be returned, and values should be set to True (by default, all fields are returned) :type fields: dict :param io: Include input and output fields in description; cannot be provided with *fields*; default is True if *fields* is not provided (deprecated) :type io: bool :returns: Description of the job :rtype: dict Returns a hash with key-value pairs containing information about the job, including its state and (optionally) its inputs and outputs, as described in the API documentation for the `/job-xxxx/describe <https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method:-/job-xxxx/describe>`_ method. """ if fields is not None and io is not None: raise DXError('DXJob.describe: cannot provide non-None values for both fields and io') describe_input = {} if fields is not None: describe_input['fields'] = fields if io is not None: describe_input['io'] = io self._desc = dxpy.api.job_describe(self._dxid, describe_input, **kwargs) return self._desc
:param fields: dict where the keys are field names that should be returned, and values should be set to True (by default, all fields are returned) :type fields: dict :param io: Include input and output fields in description; cannot be provided with *fields*; default is True if *fields* is not provided (deprecated) :type io: bool :returns: Description of the job :rtype: dict Returns a hash with key-value pairs containing information about the job, including its state and (optionally) its inputs and outputs, as described in the API documentation for the `/job-xxxx/describe <https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method:-/job-xxxx/describe>`_ method.
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxjob.py#L190-L219