Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def switches(self):
if not self.__switches:
self.__switches = Switches(self.__connection)
return self.__switches | [
"\n Gets the Switches API client.\n\n Returns:\n Switches:\n "
]
|
Please provide a description of the function:def roles(self):
if not self.__roles:
self.__roles = Roles(self.__connection)
return self.__roles | [
"\n Gets the Roles API client.\n\n Returns:\n Roles:\n "
]
|
Please provide a description of the function:def switch_types(self):
if not self.__switch_types:
self.__switch_types = SwitchTypes(self.__connection)
return self.__switch_types | [
"\n Gets the SwitchTypes API client.\n\n Returns:\n SwitchTypes:\n "
]
|
Please provide a description of the function:def logical_switches(self):
if not self.__logical_switches:
self.__logical_switches = LogicalSwitches(self.__connection)
return self.__logical_switches | [
"\n Gets the LogicalSwitches API client.\n\n Returns:\n LogicalSwitches:\n "
]
|
Please provide a description of the function:def tasks(self):
if not self.__tasks:
self.__tasks = Tasks(self.__connection)
return self.__tasks | [
"\n Gets the Tasks API client.\n\n Returns:\n Tasks:\n "
]
|
Please provide a description of the function:def enclosure_groups(self):
if not self.__enclosure_groups:
self.__enclosure_groups = EnclosureGroups(self.__connection)
return self.__enclosure_groups | [
"\n Gets the EnclosureGroups API client.\n\n Returns:\n EnclosureGroups:\n "
]
|
Please provide a description of the function:def logical_enclosures(self):
if not self.__logical_enclosures:
self.__logical_enclosures = LogicalEnclosures(self.__connection)
return self.__logical_enclosures | [
"\n Gets the LogicalEnclosures API client.\n\n Returns:\n LogicalEnclosures:\n "
]
|
Please provide a description of the function:def metric_streaming(self):
if not self.__metric_streaming:
self.__metric_streaming = MetricStreaming(self.__connection)
return self.__metric_streaming | [
"\n Gets the MetricStreaming API client.\n\n Returns:\n MetricStreaming:\n "
]
|
Please provide a description of the function:def interconnects(self):
if not self.__interconnects:
self.__interconnects = Interconnects(self.__connection)
return self.__interconnects | [
"\n Gets the Interconnects API client.\n\n Returns:\n Interconnects:\n "
]
|
Please provide a description of the function:def interconnect_link_topologies(self):
if not self.__interconnect_link_topologies:
self.__interconnect_link_topologies = InterconnectLinkTopologies(self.__connection)
return self.__interconnect_link_topologies | [
"\n Gets the InterconnectLinkTopologies API client.\n\n Returns:\n InterconnectLinkTopologies:\n "
]
|
Please provide a description of the function:def sas_interconnect_types(self):
if not self.__sas_interconnect_types:
self.__sas_interconnect_types = SasInterconnectTypes(self.__connection)
return self.__sas_interconnect_types | [
"\n Gets the SasInterconnectTypes API client.\n\n Returns:\n SasInterconnectTypes:\n "
]
|
Please provide a description of the function:def internal_link_sets(self):
if not self.__internal_link_sets:
self.__internal_link_sets = InternalLinkSets(self.__connection)
return self.__internal_link_sets | [
"\n Gets the InternalLinkSets API client.\n\n Returns:\n InternalLinkSets:\n "
]
|
Please provide a description of the function:def logical_interconnect_groups(self):
if not self.__logical_interconnect_groups:
self.__logical_interconnect_groups = LogicalInterconnectGroups(
self.__connection)
return self.__logical_interconnect_groups | [
"\n Gets the LogicalInterconnectGroups API client.\n\n Returns:\n LogicalInterconnectGroups:\n "
]
|
Please provide a description of the function:def sas_logical_interconnects(self):
if not self.__sas_logical_interconnects:
self.__sas_logical_interconnects = SasLogicalInterconnects(self.__connection)
return self.__sas_logical_interconnects | [
"\n Gets the SasLogicalInterconnects API client.\n\n Returns:\n SasLogicalInterconnects:\n "
]
|
Please provide a description of the function:def logical_downlinks(self):
if not self.__logical_downlinks:
self.__logical_downlinks = LogicalDownlinks(
self.__connection)
return self.__logical_downlinks | [
"\n Gets the LogicalDownlinks API client.\n\n Returns:\n LogicalDownlinks:\n "
]
|
Please provide a description of the function:def power_devices(self):
if not self.__power_devices:
self.__power_devices = PowerDevices(self.__connection)
return self.__power_devices | [
"\n Gets the PowerDevices API client.\n\n Returns:\n PowerDevices:\n "
]
|
Please provide a description of the function:def unmanaged_devices(self):
if not self.__unmanaged_devices:
self.__unmanaged_devices = UnmanagedDevices(self.__connection)
return self.__unmanaged_devices | [
"\n Gets the Unmanaged Devices API client.\n\n Returns:\n UnmanagedDevices:\n "
]
|
Please provide a description of the function:def racks(self):
if not self.__racks:
self.__racks = Racks(self.__connection)
return self.__racks | [
"\n Gets the Racks API client.\n\n Returns:\n Racks:\n "
]
|
Please provide a description of the function:def san_managers(self):
if not self.__san_managers:
self.__san_managers = SanManagers(self.__connection)
return self.__san_managers | [
"\n Gets the SanManagers API client.\n\n Returns:\n SanManagers:\n "
]
|
Please provide a description of the function:def endpoints(self):
if not self.__endpoints:
self.__endpoints = Endpoints(self.__connection)
return self.__endpoints | [
"\n Gets the Endpoints API client.\n\n Returns:\n Endpoints:\n "
]
|
Please provide a description of the function:def storage_systems(self):
if not self.__storage_systems:
self.__storage_systems = StorageSystems(self.__connection)
return self.__storage_systems | [
"\n Gets the StorageSystems API client.\n\n Returns:\n StorageSystems:\n "
]
|
Please provide a description of the function:def storage_pools(self):
if not self.__storage_pools:
self.__storage_pools = StoragePools(self.__connection)
return self.__storage_pools | [
"\n Gets the StoragePools API client.\n\n Returns:\n StoragePools:\n "
]
|
Please provide a description of the function:def storage_volume_templates(self):
if not self.__storage_volume_templates:
self.__storage_volume_templates = StorageVolumeTemplates(self.__connection)
return self.__storage_volume_templates | [
"\n Gets the StorageVolumeTemplates API client.\n\n Returns:\n StorageVolumeTemplates:\n "
]
|
Please provide a description of the function:def storage_volume_attachments(self):
if not self.__storage_volume_attachments:
self.__storage_volume_attachments = StorageVolumeAttachments(self.__connection)
return self.__storage_volume_attachments | [
"\n Gets the StorageVolumeAttachments API client.\n\n Returns:\n StorageVolumeAttachments:\n "
]
|
Please provide a description of the function:def firmware_drivers(self):
if not self.__firmware_drivers:
self.__firmware_drivers = FirmwareDrivers(self.__connection)
return self.__firmware_drivers | [
"\n Gets the FirmwareDrivers API client.\n\n Returns:\n FirmwareDrivers:\n "
]
|
Please provide a description of the function:def firmware_bundles(self):
if not self.__firmware_bundles:
self.__firmware_bundles = FirmwareBundles(self.__connection)
return self.__firmware_bundles | [
"\n Gets the FirmwareBundles API client.\n\n Returns:\n FirmwareBundles:\n "
]
|
Please provide a description of the function:def volumes(self):
if not self.__volumes:
self.__volumes = Volumes(self.__connection)
return self.__volumes | [
"\n Gets the Volumes API client.\n\n Returns:\n Volumes:\n "
]
|
Please provide a description of the function:def sas_logical_jbod_attachments(self):
if not self.__sas_logical_jbod_attachments:
self.__sas_logical_jbod_attachments = SasLogicalJbodAttachments(self.__connection)
return self.__sas_logical_jbod_attachments | [
"\n Gets the SAS Logical JBOD Attachments client.\n\n Returns:\n SasLogicalJbodAttachments:\n "
]
|
Please provide a description of the function:def managed_sans(self):
if not self.__managed_sans:
self.__managed_sans = ManagedSANs(self.__connection)
return self.__managed_sans | [
"\n Gets the Managed SANs API client.\n\n Returns:\n ManagedSANs:\n "
]
|
Please provide a description of the function:def migratable_vc_domains(self):
if not self.__migratable_vc_domains:
self.__migratable_vc_domains = MigratableVcDomains(self.__connection)
return self.__migratable_vc_domains | [
"\n Gets the VC Migration Manager API client.\n\n Returns:\n MigratableVcDomains:\n "
]
|
Please provide a description of the function:def sas_interconnects(self):
if not self.__sas_interconnects:
self.__sas_interconnects = SasInterconnects(self.__connection)
return self.__sas_interconnects | [
"\n Gets the SAS Interconnects API client.\n\n Returns:\n SasInterconnects:\n "
]
|
Please provide a description of the function:def sas_logical_interconnect_groups(self):
if not self.__sas_logical_interconnect_groups:
self.__sas_logical_interconnect_groups = SasLogicalInterconnectGroups(self.__connection)
return self.__sas_logical_interconnect_groups | [
"\n Gets the SasLogicalInterconnectGroups API client.\n\n Returns:\n SasLogicalInterconnectGroups:\n "
]
|
Please provide a description of the function:def drive_enclosures(self):
if not self.__drive_enclures:
self.__drive_enclures = DriveEnclosures(self.__connection)
return self.__drive_enclures | [
"\n Gets the Drive Enclosures API client.\n\n Returns:\n DriveEnclosures:\n "
]
|
Please provide a description of the function:def sas_logical_jbods(self):
if not self.__sas_logical_jbods:
self.__sas_logical_jbods = SasLogicalJbods(self.__connection)
return self.__sas_logical_jbods | [
"\n Gets the SAS Logical JBODs API client.\n\n Returns:\n SasLogicalJbod:\n "
]
|
Please provide a description of the function:def labels(self):
if not self.__labels:
self.__labels = Labels(self.__connection)
return self.__labels | [
"\n Gets the Labels API client.\n\n Returns:\n Labels:\n "
]
|
Please provide a description of the function:def index_resources(self):
if not self.__index_resources:
self.__index_resources = IndexResources(self.__connection)
return self.__index_resources | [
"\n Gets the Index Resources API client.\n\n Returns:\n IndexResources:\n "
]
|
Please provide a description of the function:def alerts(self):
if not self.__alerts:
self.__alerts = Alerts(self.__connection)
return self.__alerts | [
"\n Gets the Alerts API client.\n\n Returns:\n Alerts:\n "
]
|
Please provide a description of the function:def events(self):
if not self.__events:
self.__events = Events(self.__connection)
return self.__events | [
"\n Gets the Events API client.\n\n Returns:\n Events:\n "
]
|
Please provide a description of the function:def os_deployment_servers(self):
if not self.__os_deployment_servers:
self.__os_deployment_servers = OsDeploymentServers(self.__connection)
return self.__os_deployment_servers | [
"\n Gets the Os Deployment Servers API client.\n\n Returns:\n OsDeploymentServers:\n "
]
|
Please provide a description of the function:def certificate_rabbitmq(self):
if not self.__certificate_rabbitmq:
self.__certificate_rabbitmq = CertificateRabbitMQ(self.__connection)
return self.__certificate_rabbitmq | [
"\n Gets the Certificate RabbitMQ API client.\n\n Returns:\n CertificateRabbitMQ:\n "
]
|
Please provide a description of the function:def users(self):
if not self.__users:
self.__users = Users(self.__connection)
return self.__users | [
"\n Gets the Users API client.\n\n Returns:\n Users:\n "
]
|
Please provide a description of the function:def appliance_device_read_community(self):
if not self.__appliance_device_read_community:
self.__appliance_device_read_community = ApplianceDeviceReadCommunity(self.__connection)
return self.__appliance_device_read_community | [
"\n Gets the ApplianceDeviceReadCommunity API client.\n\n Returns:\n ApplianceDeviceReadCommunity:\n "
]
|
Please provide a description of the function:def appliance_device_snmp_v1_trap_destinations(self):
if not self.__appliance_device_snmp_v1_trap_destinations:
self.__appliance_device_snmp_v1_trap_destinations = ApplianceDeviceSNMPv1TrapDestinations(self.__connection)
return self.__appliance_device_snmp_v1_trap_destinations | [
"\n Gets the ApplianceDeviceSNMPv1TrapDestinations API client.\n\n Returns:\n ApplianceDeviceSNMPv1TrapDestinations:\n "
]
|
Please provide a description of the function:def appliance_device_snmp_v3_trap_destinations(self):
if not self.__appliance_device_snmp_v3_trap_destinations:
self.__appliance_device_snmp_v3_trap_destinations = ApplianceDeviceSNMPv3TrapDestinations(self.__connection)
return self.__appliance_device_snmp_v3_trap_destinations | [
"\n Gets the ApplianceDeviceSNMPv3TrapDestinations API client.\n\n Returns:\n ApplianceDeviceSNMPv3TrapDestinations:\n "
]
|
Please provide a description of the function:def appliance_device_snmp_v3_users(self):
if not self.__appliance_device_snmp_v3_users:
self.__appliance_device_snmp_v3_users = ApplianceDeviceSNMPv3Users(self.__connection)
return self.__appliance_device_snmp_v3_users | [
"\n Gets the ApplianceDeviceSNMPv3Users API client.\n\n Returns:\n ApplianceDeviceSNMPv3Users:\n "
]
|
Please provide a description of the function:def appliance_node_information(self):
if not self.__appliance_node_information:
self.__appliance_node_information = ApplianceNodeInformation(self.__connection)
return self.__appliance_node_information | [
"\n Gets the ApplianceNodeInformation API client.\n\n Returns:\n ApplianceNodeInformation:\n "
]
|
Please provide a description of the function:def appliance_time_and_locale_configuration(self):
if not self.__appliance_time_and_locale_configuration:
self.__appliance_time_and_locale_configuration = ApplianceTimeAndLocaleConfiguration(self.__connection)
return self.__appliance_time_and_locale_configuration | [
"\n Gets the ApplianceTimeAndLocaleConfiguration API client.\n\n Returns:\n ApplianceTimeAndLocaleConfiguration:\n "
]
|
Please provide a description of the function:def versions(self):
if not self.__versions:
self.__versions = Versions(self.__connection)
return self.__versions | [
"\n Gets the Version API client.\n\n Returns:\n Version:\n "
]
|
Please provide a description of the function:def backups(self):
if not self.__backups:
self.__backups = Backups(self.__connection)
return self.__backups | [
"\n Gets the Backup API client.\n\n Returns:\n Backups:\n "
]
|
Please provide a description of the function:def login_details(self):
if not self.__login_details:
self.__login_details = LoginDetails(self.__connection)
return self.__login_details | [
"\n Gets the login details\n\n Returns:\n List of login details\n "
]
|
Please provide a description of the function:def licenses(self):
if not self.__licenses:
self.__licenses = Licenses(self.__connection)
return self.__licenses | [
"\n Gets all the licenses\n Returns:\n List of licenses\n "
]
|
Please provide a description of the function:def create(self, data=None, uri=None, timeout=-1, force=True):
if not data:
data = {}
default_values = self._get_default_values()
for key, value in default_values.items():
if not data.get(key):
data[key] = value
resource_data = self._helper.create(data, uri, timeout, force=force)
new_resource = self.new(self._connection, resource_data)
return new_resource | [
"Makes a POST request to create a resource when a request body is required.\n\n Args:\n data: Additional fields can be passed to create the resource.\n uri: Resouce uri\n timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\n in OneView; it just stops waiting for its completion.\n force: Flag to force the operation\n Returns:\n Created resource.\n "
]
|
Please provide a description of the function:def update(self, data=None, timeout=-1, force=True):
uri = self.data['uri']
resource = deepcopy(self.data)
resource.update(data)
self.data = self._helper.update(resource, uri, force, timeout)
return self | [
"Updates server profile template.\n\n Args:\n data: Data to update the resource.\n timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\n in OneView; it just stops waiting for its completion.\n force: Force the update operation.\n\n Returns:\n A dict with the updated resource data.\n "
]
|
Please provide a description of the function:def get_new_profile(self):
uri = "{}/new-profile".format(self.data["uri"])
return self._helper.do_get(uri) | [
"\n A profile object will be returned with the configuration based on this template. Specify the profile name and\n server hardware to assign. If template has any fiber channel connection (which is specified as bootable) but no\n boot target was defined, that connection will be instantiated as a non-bootable connection. So modify that\n connection to change it to bootable and to specify the boot target.\n\n Returns:\n dict: The server profile resource.\n "
]
|
Please provide a description of the function:def get_transformation(self, server_hardware_type_uri, enclosure_group_uri):
query_params = self.TRANSFORMATION_PATH.format(**locals())
uri = "{}{}".format(self.data["uri"], query_params)
return self._helper.do_get(uri) | [
"\n Transforms an existing profile template by supplying a new server hardware type and enclosure group or both.\n A profile template will be returned with a new configuration based on the capabilities of the supplied\n server hardware type and/or enclosure group. All configured connections will have their port assignments\n set to 'Auto.'\n The new profile template can subsequently be used in the update method, but is not guaranteed to pass\n validation. Any incompatibilities will be flagged when the transformed server profile template is submitted.\n\n Note:\n This method is available for API version 300 or later.\n\n Args:\n server_hardware_type_uri: The URI of the new server hardware type.\n enclosure_group_uri: The URI of the new enclosure group.\n\n Returns:\n dict: The server profile template resource.\n "
]
|
Please provide a description of the function:def get_available_networks(self, **kwargs):
query_string = '&'.join('{}={}'.format(key, value)
for key, value in kwargs.items() if value)
uri = self.URI + "{}?{}".format("/available-networks", query_string)
return self._helper.do_get(uri) | [
"\n Retrieves the list of Ethernet networks, Fibre Channel networks and network sets that are available\n to a server profile template along with their respective ports. The scopeUris, serverHardwareTypeUri and\n enclosureGroupUri parameters should be specified to get the available networks for a new server profile template.\n The serverHardwareTypeUri, enclosureGroupUri, and profileTemplateUri should be specified to get available\n networks for an existing server profile template.\n The scopeUris parameter is ignored when the profileTemplateUri is specified.\n\n Args:\n enclosureGroupUri: The URI of the enclosure group is required when the serverHardwareTypeUri\n specifies a blade server.\n profileTemplateUri: If the URI of the server profile template is provided the list of available\n networks will include only networks that share a scope with the server profile template.\n scopeUris: An expression to restrict the resources returned according to the scopes\n to which they are assigned.\n serverHardwareTypeUri: If the server hardware type specifies a rack server, the list of\n available network includes all networks that are applicable for the specified server hardware type.\n If the server hardware type specifies a blade server, the enclosureGroupUri parameter must be\n specified, and the list of available networks includes all networks that are applicable for the\n specified server hardware type and all empty bays within the enclosure group that can support\n the specified server hardware type.\n view: The FunctionType (Ethernet or FibreChannel) to filter the list of networks returned.\n\n Returns:\n dict: Dictionary with available networks details.\n "
]
|
Please provide a description of the function:def get_all_without_ethernet(self, start=0, count=-1, filter='', sort=''):
without_ethernet_client = ResourceClient(
self._connection, "/rest/logical-downlinks/withoutEthernet")
return without_ethernet_client.get_all(start, count, filter=filter, sort=sort) | [
"\n Gets a paginated collection of logical downlinks without ethernet. The collection is\n based on optional sorting and filtering and is constrained by start and count parameters.\n\n Args:\n start:\n The first item to return, using 0-based indexing.\n If not specified, the default is 0 - start with the first available item.\n count:\n The number of resources to return. A count of -1 requests all items.\n The actual number of items in the response might differ from the requested\n count if the sum of start and count exceeds the total number of items.\n filter (list or str):\n A general filter/query string to narrow the list of items returned. The\n default is no filter; all resources are returned.\n sort:\n The sort order of the returned data set. By default, the sort order is based\n on create time with the oldest entry first.\n\n Returns:\n dict\n "
]
|
Please provide a description of the function:def get_without_ethernet(self, id_or_uri):
uri = self._client.build_uri(id_or_uri) + "/withoutEthernet"
return self._client.get(uri) | [
"\n Gets the logical downlink with the specified ID without ethernet.\n\n Args:\n id_or_uri: Can be either the logical downlink id or the logical downlink uri.\n\n Returns:\n dict\n "
]
|
Please provide a description of the function:def update_firmware(self, firmware_information, force=False):
firmware_uri = "{}/firmware".format(self.data["uri"])
result = self._helper.update(firmware_information, firmware_uri, force=force)
self.refresh()
return result | [
"\n Installs firmware to the member interconnects of a SAS Logical Interconnect.\n\n Args:\n firmware_information: Options to install firmware to a SAS Logical Interconnect.\n force: If sets to true, the operation completes despite any problems with the network connectivy\n or the erros on the resource itself.\n Returns:\n dict: SAS Logical Interconnect Firmware.\n "
]
|
Please provide a description of the function:def get_firmware(self):
firmware_uri = "{}/firmware".format(self.data["uri"])
return self._helper.do_get(firmware_uri) | [
"\n Gets baseline firmware information for a SAS Logical Interconnect.\n\n Returns:\n dict: SAS Logical Interconnect Firmware.\n "
]
|
Please provide a description of the function:def update_compliance_all(self, information, timeout=-1):
uri = self.URI + "/compliance"
result = self._helper.update(information, uri, timeout=timeout)
return result | [
"\n Returns SAS Logical Interconnects to a consistent state. The current SAS Logical Interconnect state is\n compared to the associated SAS Logical Interconnect group.\n\n Args:\n information: Can be either the resource ID or URI.\n timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\n in OneView; it just stops waiting for its completion.\n\n Returns:\n dict: SAS Logical Interconnect.\n "
]
|
Please provide a description of the function:def replace_drive_enclosure(self, information):
uri = "{}/replaceDriveEnclosure".format(self.data["uri"])
result = self._helper.create(information, uri)
self.refresh()
return result | [
"\n When a drive enclosure has been physically replaced, initiate the replacement operation that enables the\n new drive enclosure to take over as a replacement for the prior drive enclosure. The request requires\n specification of both the serial numbers of the original drive enclosure and its replacement to be provided.\n\n Args:\n information: Options to replace the drive enclosure.\n\n Returns:\n dict: SAS Logical Interconnect.\n "
]
|
Please provide a description of the function:def update_configuration(self):
uri = "{}/configuration".format(self.data["uri"])
result = self._helper.update({}, uri)
self.refresh()
return result | [
"\n Asynchronously applies or re-applies the SAS Logical Interconnect configuration to all managed interconnects\n of a SAS Logical Interconnect.\n\n Returns:\n dict: SAS Logical Interconnect.\n "
]
|
Please provide a description of the function:def encode_multipart_formdata(self, fields, files, baseName, verbose=False):
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
if verbose is True:
print(('Encoding ' + baseName + ' for upload...'))
fin = self._open(files, 'rb')
fout = self._open(files + '.b64', 'wb')
fout.write(bytearray('--' + BOUNDARY + CRLF, 'utf-8'))
fout.write(bytearray('Content-Disposition: form-data'
'; name="file"; filename="' + baseName + '"' + CRLF, "utf-8"))
fout.write(bytearray('Content-Type: application/octet-stream' + CRLF,
'utf-8'))
fout.write(bytearray(CRLF, 'utf-8'))
shutil.copyfileobj(fin, fout)
fout.write(bytearray(CRLF, 'utf-8'))
fout.write(bytearray('--' + BOUNDARY + '--' + CRLF, 'utf-8'))
fout.write(bytearray(CRLF, 'utf-8'))
fout.close()
fin.close()
return content_type | [
"\n Fields is a sequence of (name, value) elements for regular form fields.\n Files is a sequence of (name, filename, value) elements for data\n to be uploaded as files\n\n Returns: (content_type, body) ready for httplib.HTTP instance\n "
]
|
Please provide a description of the function:def mutationhash(strings, nedit):
maxlen = max([len(string) for string in strings])
indexes = generate_idx(maxlen, nedit)
muthash = defaultdict(set)
for string in strings:
muthash[string].update([string])
for x in substitution_set(string, indexes):
muthash[x].update([string])
return muthash | [
"\n produce a hash with each key a nedit distance substitution for a set of\n strings. values of the hash is the set of strings the substitution could\n have come from\n "
]
|
Please provide a description of the function:def substitution_set(string, indexes):
strlen = len(string)
return {mutate_string(string, x) for x in indexes if valid_substitution(strlen, x)} | [
"\n for a string, return a set of all possible substitutions\n "
]
|
Please provide a description of the function:def valid_substitution(strlen, index):
values = index[0]
return all([strlen > i for i in values]) | [
"\n skip performing substitutions that are outside the bounds of the string\n "
]
|
Please provide a description of the function:def generate_idx(maxlen, nedit):
ALPHABET = ["A", "C", "G", "T", "N"]
indexlists = []
ALPHABETS = [ALPHABET for x in range(nedit)]
return list(itertools.product(itertools.combinations(range(maxlen), nedit),
*ALPHABETS)) | [
"\n generate all possible nedit edits of a string. each item has the form\n ((index1, index2), 'A', 'G') for nedit=2\n index1 will be replaced by 'A', index2 by 'G'\n\n this covers all edits < nedit as well since some of the specified\n substitutions will not change the base\n "
]
|
Please provide a description of the function:def acgt_match(string):
search = re.compile(r'[^ACGT]').search
return not bool(search(string)) | [
"\n returns True if sting consist of only \"A \"C\" \"G\" \"T\"\n "
]
|
Please provide a description of the function:def stream_fastq(file_handler):
''' Generator which gives all four lines if a fastq read as one string
'''
next_element = ''
for i, line in enumerate(file_handler):
next_element += line
if i % 4 == 3:
yield next_element
next_element = '' | []
|
Please provide a description of the function:def read_fastq(filename):
if not filename:
return itertools.cycle((None,))
if filename == "-":
filename_fh = sys.stdin
elif filename.endswith('gz'):
if is_python3:
filename_fh = gzip.open(filename, mode='rt')
else:
filename_fh = BufferedReader(gzip.open(filename, mode='rt'))
else:
filename_fh = open(filename)
return stream_fastq(filename_fh) | [
"\n return a stream of FASTQ entries, handling gzipped and empty files\n "
]
|
Please provide a description of the function:def write_fastq(filename):
if filename:
if filename.endswith('gz'):
filename_fh = gzip.open(filename, mode='wb')
else:
filename_fh = open(filename, mode='w')
else:
filename_fh = None
return filename_fh | [
"\n return a handle for FASTQ writing, handling gzipped files\n "
]
|
Please provide a description of the function:def detect_alignment_annotations(queryalignment, tags=False):
annotations = set()
for k, v in BARCODEINFO.items():
if tags:
if queryalignment.has_tag(v.bamtag):
annotations.add(k)
else:
if v.readprefix in queryalignment.qname:
annotations.add(k)
return annotations | [
"\n detects the annotations present in a SAM file, inspecting either the\n tags or the query names and returns a set of annotations present\n "
]
|
Please provide a description of the function:def detect_fastq_annotations(fastq_file):
annotations = set()
queryread = tz.first(read_fastq(fastq_file))
for k, v in BARCODEINFO.items():
if v.readprefix in queryread:
annotations.add(k)
return annotations | [
"\n detects annotations preesent in a FASTQ file by examining the first read\n "
]
|
Please provide a description of the function:def construct_transformed_regex(annotations):
re_string = '.*'
if "cellular" in annotations:
re_string += ":CELL_(?P<CB>.*)"
if "molecular" in annotations:
re_string += ":UMI_(?P<MB>\w*)"
if "sample" in annotations:
re_string += ":SAMPLE_(?P<SB>\w*)"
if re_string == ".*":
logger.error("No annotation present on this file, aborting.")
sys.exit(1)
return re_string | [
"\n construct a regex that matches possible fields in a transformed file\n annotations is a set of which keys in BARCODEINFO are present in the file\n "
]
|
Please provide a description of the function:def fastqtransform(transform, fastq1, fastq2, fastq3, fastq4, keep_fastq_tags,
separate_cb, demuxed_cb, cores, fastq1out, fastq2out,
min_length):
''' Transform input reads to the tagcounts compatible read layout using
regular expressions as defined in a transform file. Outputs new format to
stdout.
'''
transform = json.load(open(transform))
options = _infer_transform_options(transform)
read_template = '{name}'
logger.info("Transforming %s." % fastq1)
if options.dual_index:
logger.info("Detected dual cellular indexes.")
if separate_cb:
read_template += ':CELL_{CB1}-{CB2}'
else:
read_template += ':CELL_{CB}'
elif options.triple_index:
logger.info("Detected triple cellular indexes.")
if separate_cb:
read_template += ':CELL_{CB1}-{CB2}-{CB3}'
else:
read_template += ':CELL_{CB}'
elif options.CB or demuxed_cb:
logger.info("Detected cellular barcodes.")
read_template += ':CELL_{CB}'
if options.MB:
logger.info("Detected UMI.")
read_template += ':UMI_{MB}'
if options.SB:
logger.info("Detected sample.")
read_template += ':SAMPLE_{SB}'
read_template += "{readnum}"
if keep_fastq_tags:
read_template += ' {fastqtag}'
read_template += '\n{seq}\n+\n{qual}\n'
paired = fastq1out and fastq2out
read1_regex = re.compile(transform['read1'])
read2_regex = re.compile(transform['read2']) if fastq2 else None
read3_regex = re.compile(transform['read3']) if fastq3 else None
read4_regex = re.compile(transform['read4']) if fastq4 else None
fastq_file1 = read_fastq(fastq1)
fastq_file2 = read_fastq(fastq2)
fastq_file3 = read_fastq(fastq3)
fastq_file4 = read_fastq(fastq4)
transform = partial(transformer, read1_regex=read1_regex,
read2_regex=read2_regex, read3_regex=read3_regex,
read4_regex=read4_regex, paired=paired)
fastq1out_fh = write_fastq(fastq1out)
fastq2out_fh = write_fastq(fastq2out)
p = multiprocessing.Pool(cores)
try :
zzip = itertools.izip
except AttributeError:
zzip = zip
chunks = tz.partition_all(10000, zzip(fastq_file1, fastq_file2, fastq_file3,
fastq_file4))
bigchunks = tz.partition_all(cores, chunks)
for bigchunk in bigchunks:
for chunk in p.map(transform, list(bigchunk)):
if paired:
for read1_dict, read2_dict in tz.partition(2, chunk):
if options.dual_index:
if not separate_cb:
read1_dict['CB'] = read1_dict['CB1'] + read1_dict['CB2']
read2_dict['CB'] = read2_dict['CB1'] + read2_dict['CB2']
if demuxed_cb:
read1_dict['CB'] = demuxed_cb
read2_dict['CB'] = demuxed_cb
# Deal with spaces in read names
if keep_fastq_tags:
name, tag = read1_dict['name'].split(' ')
read1_dict['name'] = name
read1_dict['fastqtag'] = tag
name, tag = read2_dict['name'].split(' ')
read2_dict['name'] = name
read2_dict['fastqtag'] = tag
else:
read1_dict['name'] = read1_dict['name'].partition(' ')[0]
read2_dict['name'] = read2_dict['name'].partition(' ')[0]
read1_dict = _extract_readnum(read1_dict)
read2_dict = _extract_readnum(read2_dict)
tooshort = (len(read1_dict['seq']) < min_length or
len(read2_dict['seq']) < min_length)
if not tooshort:
fastq1out_fh.write(read_template.format(**read1_dict))
fastq2out_fh.write(read_template.format(**read2_dict))
else:
for read1_dict in chunk:
if options.dual_index:
if not separate_cb:
read1_dict['CB'] = read1_dict['CB1'] + read1_dict['CB2']
if demuxed_cb:
read1_dict['CB'] = demuxed_cb
# Deal with spaces in read names
if keep_fastq_tags:
name, tag = read1_dict['name'].split(' ')
read1_dict['name'] = name
read1_dict['fastqtag'] = tag
else:
read1_dict['name'] = read1_dict['name'].partition(' ')[0]
read1_dict = _extract_readnum(read1_dict)
if len(read1_dict['seq']) >= min_length:
if fastq1out_fh:
fastq1out_fh.write(read_template.format(**read1_dict))
else:
sys.stdout.write(read_template.format(**read1_dict)) | []
|
Please provide a description of the function:def _infer_transform_options(transform):
TransformOptions = collections.namedtuple("TransformOptions",
['CB', 'dual_index', 'triple_index', 'MB', 'SB'])
CB = False
SB = False
MB = False
dual_index = False
triple_index = False
for rx in transform.values():
if not rx:
continue
if "CB1" in rx:
if "CB3" in rx:
triple_index = True
else:
dual_index = True
if "SB" in rx:
SB = True
if "CB" in rx:
CB = True
if "MB" in rx:
MB = True
return TransformOptions(CB=CB, dual_index=dual_index, triple_index=triple_index, MB=MB, SB=SB) | [
"\n figure out what transform options should be by examining the provided\n regexes for keywords\n "
]
|
Please provide a description of the function:def _extract_readnum(read_dict):
pat = re.compile(r"(?P<readnum>/\d+)$")
parts = pat.split(read_dict["name"])
if len(parts) == 3:
name, readnum, endofline = parts
read_dict["name"] = name
read_dict["readnum"] = readnum
else:
read_dict["readnum"] = ""
return read_dict | [
"Extract read numbers from old-style fastqs.\n\n Handles read 1 and 2 specifications where naming is\n readname/1 readname/2\n "
]
|
Please provide a description of the function:def tagcount(sam, out, genemap, output_evidence_table, positional, minevidence,
cb_histogram, cb_cutoff, no_scale_evidence, subsample, sparse,
parse_tags, gene_tags):
''' Count up evidence for tagged molecules
'''
from pysam import AlignmentFile
from io import StringIO
import pandas as pd
from utils import weigh_evidence
logger.info('Reading optional files')
gene_map = None
if genemap:
with open(genemap) as fh:
try:
gene_map = dict(p.strip().split() for p in fh)
except ValueError:
logger.error('Incorrectly formatted gene_map, need to be tsv.')
sys.exit()
if positional:
tuple_template = '{0},{1},{2},{3}'
else:
tuple_template = '{0},{1},{3}'
if not cb_cutoff:
cb_cutoff = 0
if cb_histogram and cb_cutoff == "auto":
cb_cutoff = guess_depth_cutoff(cb_histogram)
cb_cutoff = int(cb_cutoff)
cb_hist = None
filter_cb = False
if cb_histogram:
cb_hist = pd.read_csv(cb_histogram, index_col=0, header=-1, squeeze=True, sep="\t")
total_num_cbs = cb_hist.shape[0]
cb_hist = cb_hist[cb_hist > cb_cutoff]
logger.info('Keeping {} out of {} cellular barcodes.'.format(cb_hist.shape[0], total_num_cbs))
filter_cb = True
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
if subsample:
logger.info('Creating reservoir of subsampled reads ({} per cell)'.format(subsample))
start_sampling = time.time()
reservoir = collections.defaultdict(list)
cb_hist_sampled = 0 * cb_hist
cb_obs = 0 * cb_hist
track = stream_bamfile(sam)
current_read = 'none_observed_yet'
for i, aln in enumerate(track):
if aln.qname == current_read:
continue
current_read = aln.qname
if parse_tags:
CB = aln.get_tag('CR')
else:
match = parser_re.match(aln.qname)
CB = match.group('CB')
if CB not in cb_hist.index:
continue
cb_obs[CB] += 1
if len(reservoir[CB]) < subsample:
reservoir[CB].append(i)
cb_hist_sampled[CB] += 1
else:
s = pd.np.random.randint(0, cb_obs[CB])
if s < subsample:
reservoir[CB][s] = i
index_filter = set(itertools.chain.from_iterable(reservoir.values()))
sam_file.close()
sampling_time = time.time() - start_sampling
logger.info('Sampling done - {:.3}s'.format(sampling_time))
evidence = collections.defaultdict(int)
logger.info('Tallying evidence')
start_tally = time.time()
sam_mode = 'r' if sam.endswith(".sam") else 'rb'
sam_file = AlignmentFile(sam, mode=sam_mode)
targets = [x["SN"] for x in sam_file.header["SQ"]]
track = sam_file.fetch(until_eof=True)
count = 0
unmapped = 0
kept = 0
nomatchcb = 0
current_read = 'none_observed_yet'
count_this_read = True
missing_transcripts = set()
for i, aln in enumerate(track):
if count and not count % 1000000:
logger.info("Processed %d alignments, kept %d." % (count, kept))
logger.info("%d were filtered for being unmapped." % unmapped)
if filter_cb:
logger.info("%d were filtered for not matching known barcodes."
% nomatchcb)
count += 1
if aln.is_unmapped:
unmapped += 1
continue
if gene_tags and not aln.has_tag('GX'):
unmapped += 1
continue
if aln.qname != current_read:
current_read = aln.qname
if subsample and i not in index_filter:
count_this_read = False
continue
else:
count_this_read = True
else:
if not count_this_read:
continue
if parse_tags:
CB = aln.get_tag('CR')
else:
match = parser_re.match(aln.qname)
CB = match.group('CB')
if filter_cb:
if CB not in cb_hist.index:
nomatchcb += 1
continue
if parse_tags:
MB = aln.get_tag('UM')
else:
MB = match.group('MB')
if gene_tags:
target_name = aln.get_tag('GX').split(',')[0]
else:
txid = sam_file.getrname(aln.reference_id)
if gene_map:
if txid in gene_map:
target_name = gene_map[txid]
else:
missing_transcripts.add(txid)
target_name = txid
else:
target_name = txid
e_tuple = tuple_template.format(CB, target_name, aln.pos, MB)
# Scale evidence by number of hits
if no_scale_evidence:
evidence[e_tuple] += 1.0
else:
evidence[e_tuple] += weigh_evidence(aln.tags)
kept += 1
tally_time = time.time() - start_tally
if missing_transcripts:
logger.warn('The following transcripts were missing gene_ids, so we added them as the transcript ids: %s' % str(missing_transcripts))
logger.info('Tally done - {:.3}s, {:,} alns/min'.format(tally_time, int(60. * count / tally_time)))
logger.info('Collapsing evidence')
logger.info('Writing evidence')
with tempfile.NamedTemporaryFile('w+t') as out_handle:
for key in evidence:
line = '{},{}\n'.format(key, evidence[key])
out_handle.write(line)
out_handle.flush()
out_handle.seek(0)
evidence_table = pd.read_csv(out_handle, header=None)
del evidence
evidence_query = 'evidence >= %f' % minevidence
if positional:
evidence_table.columns=['cell', 'gene', 'umi', 'pos', 'evidence']
collapsed = evidence_table.query(evidence_query).groupby(['cell', 'gene'])['umi', 'pos'].size()
else:
evidence_table.columns=['cell', 'gene', 'umi', 'evidence']
collapsed = evidence_table.query(evidence_query).groupby(['cell', 'gene'])['umi'].size()
expanded = collapsed.unstack().T
if gene_map:
# This Series is just for sorting the index
genes = pd.Series(index=set(gene_map.values()))
genes = genes.sort_index()
# Now genes is assigned to a DataFrame
genes = expanded.ix[genes.index]
elif gene_tags:
expanded.sort_index()
genes = expanded
else:
# make data frame have a complete accounting of transcripts
targets = pd.Series(index=set(targets))
targets = targets.sort_index()
expanded = expanded.reindex(targets.index.values, fill_value=0)
genes = expanded
genes.fillna(0, inplace=True)
genes = genes.astype(int)
genes.index.name = "gene"
logger.info('Output results')
if subsample:
cb_hist_sampled.to_csv('ss_{}_'.format(subsample) + os.path.basename(cb_histogram), sep='\t')
if output_evidence_table:
import shutil
buf.seek(0)
with open(output_evidence_table, 'w') as etab_fh:
shutil.copyfileobj(buf, etab_fh)
if sparse:
pd.Series(genes.index).to_csv(out + ".rownames", index=False, header=False)
pd.Series(genes.columns.values).to_csv(out + ".colnames", index=False, header=False)
with open(out, "w+b") as out_handle:
scipy.io.mmwrite(out_handle, scipy.sparse.csr_matrix(genes))
else:
genes.to_csv(out) | []
|
Please provide a description of the function:def fasttagcount(sam, out, genemap, positional, minevidence, cb_histogram,
cb_cutoff, subsample, parse_tags, gene_tags, umi_matrix):
''' Count up evidence for tagged molecules, this implementation assumes the
alignment file is coordinate sorted
'''
from pysam import AlignmentFile
from io import StringIO
import pandas as pd
from utils import weigh_evidence
if sam.endswith(".sam"):
logger.error("To use the fasttagcount subcommand, the alignment file must be a "
"coordinate sorted, indexed BAM file.")
sys.exit(1)
logger.info('Reading optional files')
gene_map = None
if genemap:
with open(genemap) as fh:
try:
gene_map = dict(p.strip().split() for p in fh)
except ValueError:
logger.error('Incorrectly formatted gene_map, need to be tsv.')
sys.exit()
if positional:
tuple_template = '{0},{1},{2},{3}'
else:
tuple_template = '{0},{1},{3}'
if not cb_cutoff:
cb_cutoff = 0
if cb_histogram and cb_cutoff == "auto":
cb_cutoff = guess_depth_cutoff(cb_histogram)
cb_cutoff = int(cb_cutoff)
cb_hist = None
filter_cb = False
if cb_histogram:
cb_hist = pd.read_csv(cb_histogram, index_col=0, header=-1, squeeze=True, sep="\t")
total_num_cbs = cb_hist.shape[0]
cb_hist = cb_hist[cb_hist > cb_cutoff]
logger.info('Keeping {} out of {} cellular barcodes.'.format(cb_hist.shape[0], total_num_cbs))
filter_cb = True
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
if subsample:
logger.info('Creating reservoir of subsampled reads ({} per cell)'.format(subsample))
start_sampling = time.time()
reservoir = collections.defaultdict(list)
cb_hist_sampled = 0 * cb_hist
cb_obs = 0 * cb_hist
track = stream_bamfile(sam)
current_read = 'none_observed_yet'
for i, aln in enumerate(track):
if aln.qname == current_read:
continue
current_read = aln.qname
if parse_tags:
CB = aln.get_tag('CR')
else:
match = parser_re.match(aln.qname)
CB = match.group('CB')
if CB not in cb_hist.index:
continue
cb_obs[CB] += 1
if len(reservoir[CB]) < subsample:
reservoir[CB].append(i)
cb_hist_sampled[CB] += 1
else:
s = pd.np.random.randint(0, cb_obs[CB])
if s < subsample:
reservoir[CB][s] = i
index_filter = set(itertools.chain.from_iterable(reservoir.values()))
sam_file.close()
sampling_time = time.time() - start_sampling
logger.info('Sampling done - {:.3}s'.format(sampling_time))
evidence = collections.defaultdict(lambda: collections.defaultdict(float))
bare_evidence = collections.defaultdict(float)
logger.info('Tallying evidence')
start_tally = time.time()
sam_mode = 'r' if sam.endswith(".sam") else 'rb'
sam_file = AlignmentFile(sam, mode=sam_mode)
transcript_map = collections.defaultdict(set)
sam_transcripts = [x["SN"] for x in sam_file.header["SQ"]]
if gene_map:
for transcript, gene in gene_map.items():
if transcript in sam_transcripts:
transcript_map[gene].add(transcript)
else:
for transcript in sam_transcripts:
transcript_map[transcript].add(transcript)
missing_transcripts = set()
alignments_processed = 0
unmapped = 0
kept = 0
nomatchcb = 0
current_read = 'none_observed_yet'
current_transcript = None
count_this_read = True
transcripts_processed = 0
genes_processed = 0
cells = list(cb_hist.index)
targets_seen = set()
if umi_matrix:
bare_evidence_handle = open(umi_matrix, "w")
bare_evidence_handle.write(",".join(["gene"] + cells) + "\n")
with open(out, "w") as out_handle:
out_handle.write(",".join(["gene"] + cells) + "\n")
for gene, transcripts in transcript_map.items():
for transcript in transcripts:
for aln in sam_file.fetch(transcript):
alignments_processed += 1
if aln.is_unmapped:
unmapped += 1
continue
if gene_tags and not aln.has_tag('GX'):
unmapped += 1
continue
if aln.qname != current_read:
current_read = aln.qname
if subsample and i not in index_filter:
count_this_read = False
continue
else:
count_this_read = True
else:
if not count_this_read:
continue
if parse_tags:
CB = aln.get_tag('CR')
else:
match = parser_re.match(aln.qname)
CB = match.group('CB')
if filter_cb:
if CB not in cb_hist.index:
nomatchcb += 1
continue
if parse_tags:
MB = aln.get_tag('UM')
else:
MB = match.group('MB')
if gene_tags:
target_name = aln.get_tag('GX').split(',')[0]
else:
txid = sam_file.getrname(aln.reference_id)
if gene_map:
if txid in gene_map:
target_name = gene_map[txid]
else:
missing_transcripts.add(txid)
continue
else:
target_name = txid
targets_seen.add(target_name)
# Scale evidence by number of hits
evidence[CB][MB] += weigh_evidence(aln.tags)
bare_evidence[CB] += weigh_evidence(aln.tags)
kept += 1
transcripts_processed += 1
if not transcripts_processed % 1000:
logger.info("%d genes processed." % genes_processed)
logger.info("%d transcripts processed." % transcripts_processed)
logger.info("%d alignments processed." % alignments_processed)
earray = []
for cell in cells:
umis = [1 for _, v in evidence[cell].items() if v >= minevidence]
earray.append(str(sum(umis)))
out_handle.write(",".join([gene] + earray) + "\n")
earray = []
if umi_matrix:
for cell in cells:
earray.append(str(int(bare_evidence[cell])))
bare_evidence_handle.write(",".join([gene] + earray) + "\n")
evidence = collections.defaultdict(lambda: collections.defaultdict(int))
bare_evidence = collections.defaultdict(int)
genes_processed += 1
if umi_matrix:
bare_evidence_handle.close()
# fill dataframe with missing values, sort and output
df = pd.read_csv(out, index_col=0, header=0)
targets = pd.Series(index=set(transcript_map.keys()))
targets = targets.sort_index()
df = df.reindex(targets.index.values, fill_value=0)
df = df.sort_index()
df.to_csv(out)
if umi_matrix:
df = pd.read_csv(umi_matrix, index_col=0, header=0)
df = df.reindex(targets.index.values, fill_value=0)
df = df.sort_index()
df.to_csv(umi_matrix) | []
|
Please provide a description of the function:def sparse(csv, sparse):
''' Convert a CSV file to a sparse matrix with rows and column names
saved as companion files.
'''
import pandas as pd
df = pd.read_csv(csv, index_col=0, header=0)
pd.Series(df.index).to_csv(sparse + ".rownames", index=False)
pd.Series(df.columns.values).to_csv(sparse + ".colnames", index=False)
with open(sparse, "w+b") as out_handle:
scipy.io.mmwrite(out_handle, scipy.sparse.csr_matrix(df)) | []
|
Please provide a description of the function:def cb_histogram(fastq, umi_histogram):
''' Counts the number of reads for each cellular barcode
Expects formatted fastq files.
'''
annotations = detect_fastq_annotations(fastq)
re_string = construct_transformed_regex(annotations)
parser_re = re.compile(re_string)
cb_counter = collections.Counter()
umi_counter = collections.Counter()
for read in read_fastq(fastq):
match = parser_re.search(read).groupdict()
cb = match['CB']
cb_counter[cb] += 1
if umi_histogram:
umi = match['MB']
umi_counter[(cb, umi)] += 1
for bc, count in cb_counter.most_common():
sys.stdout.write('{}\t{}\n'.format(bc, count))
if umi_histogram:
with open(umi_histogram, "w") as umi_handle:
for cbumi, count in umi_counter.most_common():
umi_handle.write('{}\t{}\t{}\n'.format(cbumi[0], cbumi[1], count)) | []
|
Please provide a description of the function:def get_cb_depth_set(cb_histogram, cb_cutoff):
''' Returns a set of barcodes with a minimum number of reads
'''
cb_keep_set = set()
if not cb_histogram:
return cb_keep_set
with read_cbhistogram(cb_histogram) as fh:
cb_map = dict(p.strip().split() for p in fh)
cb_keep_set = set([k for k, v in cb_map.items() if int(v) > cb_cutoff])
logger.info('Keeping %d out of %d cellular barcodes.'
% (len(cb_keep_set), len(cb_map)))
return cb_keep_set | []
|
Please provide a description of the function:def guess_depth_cutoff(cb_histogram):
''' Guesses at an appropriate barcode cutoff
'''
with read_cbhistogram(cb_histogram) as fh:
cb_vals = [int(p.strip().split()[1]) for p in fh]
histo = np.histogram(np.log10(cb_vals), bins=50)
vals = histo[0]
edges = histo[1]
mids = np.array([(edges[i] + edges[i+1])/2 for i in range(edges.size - 1)])
wdensity = vals * (10**mids) / sum(vals * (10**mids))
baseline = np.median(wdensity)
wdensity = list(wdensity)
# find highest density in upper half of barcode distribution
peak = wdensity.index(max(wdensity[len(wdensity)/2:]))
cutoff = None
for index, dens in reversed(list(enumerate(wdensity[1:peak]))):
if dens < 2 * baseline:
cutoff = index
break
if not cutoff:
return None
else:
cutoff = 10**mids[cutoff]
logger.info('Setting barcode cutoff to %d' % cutoff)
return cutoff | []
|
Please provide a description of the function:def cb_filter(fastq, bc1, bc2, bc3, cores, nedit):
''' Filters reads with non-matching barcodes
Expects formatted fastq files.
'''
with open_gzipsafe(bc1) as bc1_fh:
bc1 = set(cb.strip() for cb in bc1_fh)
if bc2:
with open_gzipsafe(bc2) as bc2_fh:
bc2 = set(cb.strip() for cb in bc2_fh)
if bc3:
with open_gzipsafe(bc3) as bc3_fh:
bc3 = set(cb.strip() for cb in bc3_fh)
annotations = detect_fastq_annotations(fastq)
re_string = construct_transformed_regex(annotations)
if nedit == 0:
filter_cb = partial(exact_barcode_filter, bc1=bc1, bc2=bc2, bc3=bc3,
re_string=re_string)
else:
bc1hash = MutationHash(bc1, nedit)
bc2hash = None
bc3hash = None
if bc2:
bc2hash = MutationHash(bc2, nedit)
if bc3:
bc3hash = MutationHash(bc3, nedit)
filter_cb = partial(correcting_barcode_filter, bc1hash=bc1hash,
bc2hash=bc2hash, bc3hash=bc3hash, re_string=re_string)
p = multiprocessing.Pool(cores)
chunks = tz.partition_all(10000, read_fastq(fastq))
bigchunks = tz.partition_all(cores, chunks)
for bigchunk in bigchunks:
for chunk in p.map(filter_cb, list(bigchunk)):
for read in chunk:
sys.stdout.write(read) | []
|
Please provide a description of the function:def sb_filter(fastq, bc, cores, nedit):
''' Filters reads with non-matching sample barcodes
Expects formatted fastq files.
'''
barcodes = set(sb.strip() for sb in bc)
if nedit == 0:
filter_sb = partial(exact_sample_filter2, barcodes=barcodes)
else:
barcodehash = MutationHash(barcodes, nedit)
filter_sb = partial(correcting_sample_filter2, barcodehash=barcodehash)
p = multiprocessing.Pool(cores)
chunks = tz.partition_all(10000, read_fastq(fastq))
bigchunks = tz.partition_all(cores, chunks)
for bigchunk in bigchunks:
for chunk in p.map(filter_sb, list(bigchunk)):
for read in chunk:
sys.stdout.write(read) | []
|
Please provide a description of the function:def mb_filter(fastq, cores):
''' Filters umis with non-ACGT bases
Expects formatted fastq files.
'''
filter_mb = partial(umi_filter)
p = multiprocessing.Pool(cores)
chunks = tz.partition_all(10000, read_fastq(fastq))
bigchunks = tz.partition_all(cores, chunks)
for bigchunk in bigchunks:
for chunk in p.map(filter_mb, list(bigchunk)):
for read in chunk:
sys.stdout.write(read) | []
|
Please provide a description of the function:def add_uid(fastq, cores):
''' Adds UID:[samplebc cellbc umi] to readname for umi-tools deduplication
Expects formatted fastq files with correct sample and cell barcodes.
'''
uids = partial(append_uids)
p = multiprocessing.Pool(cores)
chunks = tz.partition_all(10000, read_fastq(fastq))
bigchunks = tz.partition_all(cores, chunks)
for bigchunk in bigchunks:
for chunk in p.map(uids, list(bigchunk)):
for read in chunk:
sys.stdout.write(read) | []
|
Please provide a description of the function:def kallisto(fastq, out_dir, cb_histogram, cb_cutoff):
''' Convert fastqtransformed file to output format compatible with
kallisto.
'''
parser_re = re.compile('(.*):CELL_(?<CB>.*):UMI_(?P<UMI>.*)\\n(.*)\\n\\+\\n(.*)\\n')
if fastq.endswith('gz'):
fastq_fh = gzip.GzipFile(fileobj=open(fastq))
elif fastq == "-":
fastq_fh = sys.stdin
else:
fastq_fh = open(fastq)
cb_depth_set = get_cb_depth_set(cb_histogram, cb_cutoff)
cb_set = set()
cb_batch = collections.defaultdict(list)
parsed = 0
for read in stream_fastq(fastq_fh):
match = parser_re.search(read).groupdict()
umi = match['UMI']
cb = match['CB']
if cb_depth_set and cb not in cb_depth_set:
continue
parsed += 1
cb_set.add(cb)
cb_batch[cb].append((read, umi))
# write in batches to avoid opening up file handles repeatedly
if not parsed % 10000000:
for cb, chunk in cb_batch.items():
write_kallisto_chunk(out_dir, cb, chunk)
cb_batch = collections.defaultdict(list)
for cb, chunk in cb_batch.items():
write_kallisto_chunk(out_dir, cb, chunk)
with open(os.path.join(out_dir, "barcodes.batch"), "w") as out_handle:
out_handle.write("#id umi-file file-1\n")
batchformat = "{cb} {cb}.umi {cb}.fq\n"
for cb in cb_set:
out_handle.write(batchformat.format(**locals())) | []
|
Please provide a description of the function:def bamtag(sam):
''' Convert a BAM/SAM with fastqtransformed read names to have UMI and
cellular barcode tags
'''
from pysam import AlignmentFile
start_time = time.time()
sam_file = open_bamfile(sam)
out_file = AlignmentFile("-", "wh", template=sam_file)
track = sam_file.fetch(until_eof=True)
# peek at first alignment to determine the annotations
if is_python3():
queryalignment = next(track)
else:
queryalignment = track.next()
annotations = detect_alignment_annotations(queryalignment)
track = itertools.chain([queryalignment], track)
re_string = construct_transformed_regex(annotations)
parser_re = re.compile(re_string)
for count, aln in enumerate(track, start=1):
if count and not count % 1000000:
logger.info("Processed %d alignments." % count)
match = parser_re.match(aln.qname)
tags = aln.tags
if "cellular" in annotations:
aln.tags += [('XC', match.group('CB'))]
if "molecular" in annotations:
aln.tags += [('RX', match.group('MB'))]
if "sample" in annotations:
aln.tags += [('XS', match.group('SB'))]
out_file.write(aln)
total_time = time.time() - start_time
logger.info('BAM tag conversion done - {:.3}s, {:,} alns/min'.format(total_time, int(60. * count / total_time)))
logger.info("Processed %d alignments." % count) | []
|
Please provide a description of the function:def demultiplex_samples(fastq, out_dir, nedit, barcodes):
''' Demultiplex a fastqtransformed FASTQ file into a FASTQ file for
each sample.
'''
annotations = detect_fastq_annotations(fastq)
re_string = construct_transformed_regex(annotations)
parser_re = re.compile(re_string)
if barcodes:
barcodes = set(barcode.strip() for barcode in barcodes)
else:
barcodes = set()
if nedit == 0:
filter_bc = partial(exact_sample_filter, barcodes=barcodes)
else:
barcodehash = MutationHash(barcodes, nedit)
filter_bc = partial(correcting_sample_filter, barcodehash=barcodehash)
sample_set = set()
batch = collections.defaultdict(list)
parsed = 0
safe_makedir(out_dir)
for read in read_fastq(fastq):
parsed += 1
read = filter_bc(read)
if not read:
continue
match = parser_re.search(read).groupdict()
sample = match['SB']
sample_set.add(sample)
batch[sample].append(read)
# write in batches to avoid opening up file handles repeatedly
if not parsed % 10000000:
for sample, reads in batch.items():
out_file = os.path.join(out_dir, sample + ".fq")
with open(out_file, "a") as out_handle:
for read in reads:
fixed = filter_bc(read)
if fixed:
out_handle.write(fixed)
batch = collections.defaultdict(list)
for sample, reads in batch.items():
out_file = os.path.join(out_dir, sample + ".fq")
with open(out_file, "a") as out_handle:
for read in reads:
fixed = filter_bc(read)
if fixed:
out_handle.write(read) | []
|
Please provide a description of the function:def demultiplex_cells(fastq, out_dir, readnumber, prefix, cb_histogram,
cb_cutoff):
''' Demultiplex a fastqtransformed FASTQ file into a FASTQ file for
each cell.
'''
annotations = detect_fastq_annotations(fastq)
re_string = construct_transformed_regex(annotations)
parser_re = re.compile(re_string)
readstring = "" if not readnumber else "_R{}".format(readnumber)
filestring = "{prefix}{sample}{readstring}.fq"
cb_set = set()
if cb_histogram:
cb_set = get_cb_depth_set(cb_histogram, cb_cutoff)
sample_set = set()
batch = collections.defaultdict(list)
parsed = 0
safe_makedir(out_dir)
for read in read_fastq(fastq):
parsed += 1
match = parser_re.search(read).groupdict()
sample = match['CB']
if cb_set and sample not in cb_set:
continue
sample_set.add(sample)
batch[sample].append(read)
# write in batches to avoid opening up file handles repeatedly
if not parsed % 10000000:
for sample, reads in batch.items():
out_file = os.path.join(out_dir, filestring.format(**locals()))
with open(out_file, "a") as out_handle:
for read in reads:
out_handle.write(read)
batch = collections.defaultdict(list)
for sample, reads in batch.items():
out_file = os.path.join(out_dir, filestring.format(**locals()))
with open(out_file, "a") as out_handle:
for read in reads:
out_handle.write(read) | []
|
Please provide a description of the function:def subset_bamfile(sam, barcodes):
from pysam import AlignmentFile
start_time = time.time()
sam_file = open_bamfile(sam)
out_file = AlignmentFile("-", "wh", template=sam_file)
track = sam_file.fetch(until_eof=True)
# peek at first alignment to determine the annotations
queryalignment = track.next()
annotations = detect_alignment_annotations(queryalignment)
track = itertools.chain([queryalignment], track)
re_string = construct_transformed_regex(annotations)
parser_re = re.compile(re_string)
barcodes = set(barcode.strip() for barcode in barcodes)
for count, aln in enumerate(track, start=1):
if count and not count % 1000000:
logger.info("Processed %d alignments." % count)
match = parser_re.match(aln.qname)
tags = aln.tags
if "cellular" in annotations:
cb = match.group('CB')
if cb in barcodes:
out_file.write(aln) | [
"\n Subset a SAM/BAM file, keeping only alignments from given\n cellular barcodes\n "
]
|
Please provide a description of the function:def array_type(data_types, field):
from sqlalchemy.dialects import postgresql
internal_type = field.base_field.get_internal_type()
# currently no support for multi-dimensional arrays
if internal_type in data_types and internal_type != 'ArrayField':
sub_type = data_types[internal_type](field)
if not isinstance(sub_type, (list, tuple)):
sub_type = [sub_type]
else:
raise RuntimeError('Unsupported array element type')
return postgresql.ARRAY(sub_type) | [
"\n Allows conversion of Django ArrayField to SQLAlchemy Array.\n Takes care of mapping the type of the array element.\n "
]
|
Please provide a description of the function:def setup_logger(logger, logfile): # noqa
# type: (logger, str) -> None
global _REGISTERED_LOGGER_HANDLERS
logger.setLevel(logging.DEBUG)
if is_none_or_empty(logfile):
handler = logging.StreamHandler()
else:
handler = logging.FileHandler(logfile, encoding='utf-8')
logging.getLogger().addHandler(handler)
formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')
formatter.default_msec_format = '%s.%03d'
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.propagate = False
_REGISTERED_LOGGER_HANDLERS.append(handler) | [
"Set up logger"
]
|
Please provide a description of the function:def set_verbose_logger_handlers(): # noqa
# type: (None) -> None
global _REGISTERED_LOGGER_HANDLERS
formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(name)s:%(funcName)s:%(lineno)d '
'%(message)s')
formatter.default_msec_format = '%s.%03d'
for handler in _REGISTERED_LOGGER_HANDLERS:
handler.setFormatter(formatter) | [
"Set logger handler formatters to more detail"
]
|
Please provide a description of the function:def join_thread(thr):
# type: (threading.Thread) -> None
if on_python2():
while True:
thr.join(timeout=1)
if not thr.isAlive():
break
else:
thr.join() | [
"Join a thread\n :type threading.Thread thr: thread to join\n "
]
|
Please provide a description of the function:def merge_dict(dict1, dict2):
# type: (dict, dict) -> dict
if not isinstance(dict1, dict) or not isinstance(dict2, dict):
raise ValueError('dict1 or dict2 is not a dictionary')
result = copy.deepcopy(dict1)
for k, v in dict2.items():
if k in result and isinstance(result[k], dict):
result[k] = merge_dict(result[k], v)
else:
result[k] = copy.deepcopy(v)
return result | [
"Recursively merge dictionaries: dict2 on to dict1. This differs\n from dict.update() in that values that are dicts are recursively merged.\n Note that only dict value types are merged, not lists, etc.\n\n :param dict dict1: dictionary to merge to\n :param dict dict2: dictionary to merge with\n :rtype: dict\n :return: merged dictionary\n "
]
|
Please provide a description of the function:def datetime_from_timestamp(ts, tz=None, as_utc=False):
# type: (float, dateutil.tz, bool) -> datetime.datetime
if tz is None:
tz = dateutil.tz.tzlocal()
dt = datetime.datetime.fromtimestamp(ts, tz=tz)
if as_utc:
return dt.astimezone(tz=dateutil.tz.tzutc())
else:
return dt | [
"Convert a timestamp into datetime with offset\n :param float ts: timestamp\n :param dateutil.tz tz: time zone or local tz if not specified\n :param bool as_utc: convert datetime to UTC\n :rtype: datetime.datetime\n :return: converted timestamp to datetime\n "
]
|
Please provide a description of the function:def scantree(path):
# type: (str) -> os.DirEntry
for entry in scandir(path):
if entry.is_dir(follow_symlinks=True):
# due to python2 compat, cannot use yield from here
for t in scantree(entry.path):
yield t
else:
yield entry | [
"Recursively scan a directory tree\n :param str path: path to scan\n :rtype: DirEntry\n :return: DirEntry via generator\n "
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.